hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
790c0d02cd3f28ce285a7ab23ee2201dccde30f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ann/activation.hh"
namespace zinhart
{
//wrappers for host functions to use to call kernels here, the wrappers will calculate the block_parameters and the threads per block
std::int32_t call_activation(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size)
{
hipError_t error_id;
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, 0);
std::int32_t warp_size = properties.warpSize;
std::int32_t threads_per_block = (layer_size + warp_size - 1) / (warp_size * warp_size);
if(threads_per_block > 4 * warp_size)
threads_per_block = 4 * warp_size;
dim3 block_launch;
block_launch.x = (layer_size + threads_per_block - 1) / threads_per_block;
block_launch.y = layer_size ;//maybe
block_launch.z = 1;
//call kernel
hipLaunchKernelGGL(( activation_kernel), dim3(block_launch), dim3(threads_per_block), 0, 0, activation_name, activation_type, Wx_plus_b, layer_size);
hipDeviceSynchronize();
error_id = hipGetLastError();
if(error_id != hipSuccess)
{
std::cerr<<"activation_kernel failed to launch with error: "<<hipGetErrorString(error_id);
return 1;
}
return 0;
}
std::int32_t call_activation(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, double coefficient, std::uint32_t layer_size)
{
hipError_t error_id;
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, 0);
std::int32_t warp_size = properties.warpSize;
std::int32_t threads_per_block = (layer_size + warp_size - 1) / (warp_size * warp_size);
if(threads_per_block > 4 * warp_size)
threads_per_block = 4 * warp_size;
dim3 block_launch;
block_launch.x = (layer_size + threads_per_block - 1) / threads_per_block;
block_launch.y = layer_size;//maybe
block_launch.z = 1;
//call kernel
hipLaunchKernelGGL(( activation_kernel_coeff), dim3(block_launch), dim3(threads_per_block), 0, 0, activation_name, activation_type, Wx_plus_b, coefficient, layer_size);
hipDeviceSynchronize();
error_id = hipGetLastError();
if(error_id != hipSuccess)
{
std::cerr<<"activation_kernel_coeff failed to launch with error: "<<hipGetErrorString(error_id);
return 1;
}
//copy memory from host to device
return 0;
}
//activation function kernels here
__global__ void activation_kernel(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size) //everything that's not leaky relu, elu, or softmax
{
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
printf("here, %d", thread_id);
switch(activation_name)
{
case ACTIVATION_NAME::SIGMOID:
Wx_plus_b[thread_id] = activation_sigmoid(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::SOFTPLUS:
Wx_plus_b[thread_id] = activation_softplus(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::TANH:
Wx_plus_b[thread_id] = activation_tanh(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::RELU:
Wx_plus_b[thread_id] = activation_relu(activation_type, Wx_plus_b[thread_id]);
break;
default:
return;
}
}
__global__ void activation_kernel_coeff(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, double coefficient, std::uint32_t layer_size)//leaky relu or elu
{
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
switch(activation_name)
{
case ACTIVATION_NAME::LEAKY_RELU:
Wx_plus_b[thread_id] = activation_leaky_relu(activation_type, Wx_plus_b[thread_id], coefficient);
break;
case ACTIVATION_NAME::EXP_LEAKY_RELU:
Wx_plus_b[thread_id] = activation_exponential_leaky_relu(activation_type, Wx_plus_b[thread_id], coefficient);
break;
default:
return;
}
}
__global__ void activation_kernel_softmax(ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size)
{
//to do
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
return;
}
/*pt call_activation(Layer & L, double & input, LAYER_NAME ln, ACTIVATION f)
{
return L(input, ln, f);
}
pt call_activation(Layer & L, double & input, double & coefficient, LAYER_NAME ln, ACTIVATION f)
{ return L(input, coefficient, ln, f); }*/
}
| 790c0d02cd3f28ce285a7ab23ee2201dccde30f9.cu | #include "ann/activation.hh"
namespace zinhart
{
//wrappers for host functions to use to call kernels here, the wrappers will calculate the block_parameters and the threads per block
std::int32_t call_activation(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size)
{
cudaError_t error_id;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
std::int32_t warp_size = properties.warpSize;
std::int32_t threads_per_block = (layer_size + warp_size - 1) / (warp_size * warp_size);
if(threads_per_block > 4 * warp_size)
threads_per_block = 4 * warp_size;
dim3 block_launch;
block_launch.x = (layer_size + threads_per_block - 1) / threads_per_block;
block_launch.y = layer_size ;//maybe
block_launch.z = 1;
//call kernel
activation_kernel<<<block_launch, threads_per_block>>>(activation_name, activation_type, Wx_plus_b, layer_size);
cudaDeviceSynchronize();
error_id = cudaGetLastError();
if(error_id != cudaSuccess)
{
std::cerr<<"activation_kernel failed to launch with error: "<<cudaGetErrorString(error_id);
return 1;
}
return 0;
}
std::int32_t call_activation(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, double coefficient, std::uint32_t layer_size)
{
cudaError_t error_id;
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
std::int32_t warp_size = properties.warpSize;
std::int32_t threads_per_block = (layer_size + warp_size - 1) / (warp_size * warp_size);
if(threads_per_block > 4 * warp_size)
threads_per_block = 4 * warp_size;
dim3 block_launch;
block_launch.x = (layer_size + threads_per_block - 1) / threads_per_block;
block_launch.y = layer_size;//maybe
block_launch.z = 1;
//call kernel
activation_kernel_coeff<<<block_launch, threads_per_block>>>(activation_name, activation_type, Wx_plus_b, coefficient, layer_size);
cudaDeviceSynchronize();
error_id = cudaGetLastError();
if(error_id != cudaSuccess)
{
std::cerr<<"activation_kernel_coeff failed to launch with error: "<<cudaGetErrorString(error_id);
return 1;
}
//copy memory from host to device
return 0;
}
//activation function kernels here
__global__ void activation_kernel(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size) //everything that's not leaky relu, elu, or softmax
{
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
printf("here, %d", thread_id);
switch(activation_name)
{
case ACTIVATION_NAME::SIGMOID:
Wx_plus_b[thread_id] = activation_sigmoid(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::SOFTPLUS:
Wx_plus_b[thread_id] = activation_softplus(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::TANH:
Wx_plus_b[thread_id] = activation_tanh(activation_type, Wx_plus_b[thread_id]);
break;
case ACTIVATION_NAME::RELU:
Wx_plus_b[thread_id] = activation_relu(activation_type, Wx_plus_b[thread_id]);
break;
default:
return;
}
}
__global__ void activation_kernel_coeff(ACTIVATION_NAME activation_name, ACTIVATION_TYPE activation_type, double * Wx_plus_b, double coefficient, std::uint32_t layer_size)//leaky relu or elu
{
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
switch(activation_name)
{
case ACTIVATION_NAME::LEAKY_RELU:
Wx_plus_b[thread_id] = activation_leaky_relu(activation_type, Wx_plus_b[thread_id], coefficient);
break;
case ACTIVATION_NAME::EXP_LEAKY_RELU:
Wx_plus_b[thread_id] = activation_exponential_leaky_relu(activation_type, Wx_plus_b[thread_id], coefficient);
break;
default:
return;
}
}
__global__ void activation_kernel_softmax(ACTIVATION_TYPE activation_type, double * Wx_plus_b, std::uint32_t layer_size)
{
//to do
std::uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id > layer_size)
return;
return;
}
/*pt call_activation(Layer & L, double & input, LAYER_NAME ln, ACTIVATION f)
{
return L(input, ln, f);
}
pt call_activation(Layer & L, double & input, double & coefficient, LAYER_NAME ln, ACTIVATION f)
{ return L(input, coefficient, ln, f); }*/
}
|
718237e850d0b170f51f38ca4ad38e00426e574d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "convolutionFFT2D_common.h"
#include "convolutionFFT2D.cuh"
////////////////////////////////////////////////////////////////////////////////
/// Position convolution kernel center at (0, 0) in the image
////////////////////////////////////////////////////////////////////////////////
extern "C" void padKernel(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int kernelH,
int kernelW,
int kernelY,
int kernelX,
hipStream_t* devStream
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y));
SET_FLOAT_BASE;
if (devStream == NULL)
padKernel_kernel << <grid, threads >> >(
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
else
padKernel_kernel << <grid, threads, 0, *devStream >> >(
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padKernel_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Prepare data for "pad to border" addressing mode
////////////////////////////////////////////////////////////////////////////////
extern "C" void padDataClampToBorder(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelW,
int kernelH,
int kernelY,
int kernelX,
hipStream_t* devStream
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y));
SET_FLOAT_BASE;
if (devStream == NULL)
padDataClampToBorder_kernel << <grid, threads >> >(
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
else
padDataClampToBorder_kernel << <grid, threads, 0, *devStream >> >(
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
extern "C" void modulateAndNormalize(
fComplex *d_Dst,
fComplex *d_Src,
int fftH,
int fftW,
int padding,
hipStream_t* devStream
)
{
assert(fftW % 2 == 0);
const int dataSize = fftH * (fftW / 2 + padding);
if (devStream==NULL)
modulateAndNormalize_kernel << <iDivUp(dataSize, 256), 256 >> >(
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
else
modulateAndNormalize_kernel << <iDivUp(dataSize, 256), 256, 0, *devStream >> >(
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
getLastCudaError("modulateAndNormalize() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// 2D R2C / C2R post/preprocessing kernels
////////////////////////////////////////////////////////////////////////////////
static const double PI = 3.1415926535897932384626433832795;
static const uint BLOCKDIM = 256;
extern "C" void spPostprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
hipLaunchKernelGGL(( spPostprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n");
}
extern "C" void spPreprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = -dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
hipLaunchKernelGGL(( spPreprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D
////////////////////////////////////////////////////////////////////////////////
extern "C" void spProcess2D(
void *d_Dst,
void *d_SrcA,
void *d_SrcB,
uint DY,
uint DX,
int dir
)
{
assert(DY % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = (DY / 2) * DX;
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE_A;
SET_FCOMPLEX_BASE_B;
hipLaunchKernelGGL(( spProcess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0,
(fComplex *)d_Dst,
(fComplex *)d_SrcA,
(fComplex *)d_SrcB,
DY, DX, threadCount,
(float)phaseBase,
0.5f / (float)(DY *DX)
);
getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n");
}
| 718237e850d0b170f51f38ca4ad38e00426e574d.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "convolutionFFT2D_common.h"
#include "convolutionFFT2D.cuh"
////////////////////////////////////////////////////////////////////////////////
/// Position convolution kernel center at (0, 0) in the image
////////////////////////////////////////////////////////////////////////////////
extern "C" void padKernel(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int kernelH,
int kernelW,
int kernelY,
int kernelX,
cudaStream_t* devStream
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y));
SET_FLOAT_BASE;
if (devStream == NULL)
padKernel_kernel << <grid, threads >> >(
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
else
padKernel_kernel << <grid, threads, 0, *devStream >> >(
d_Dst,
d_Src,
fftH,
fftW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padKernel_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Prepare data for "pad to border" addressing mode
////////////////////////////////////////////////////////////////////////////////
extern "C" void padDataClampToBorder(
float *d_Dst,
float *d_Src,
int fftH,
int fftW,
int dataH,
int dataW,
int kernelW,
int kernelH,
int kernelY,
int kernelX,
cudaStream_t* devStream
)
{
assert(d_Src != d_Dst);
dim3 threads(32, 8);
dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y));
SET_FLOAT_BASE;
if (devStream == NULL)
padDataClampToBorder_kernel << <grid, threads >> >(
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
else
padDataClampToBorder_kernel << <grid, threads, 0, *devStream >> >(
d_Dst,
d_Src,
fftH,
fftW,
dataH,
dataW,
kernelH,
kernelW,
kernelY,
kernelX
);
getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Modulate Fourier image of padded data by Fourier image of padded kernel
// and normalize by FFT size
////////////////////////////////////////////////////////////////////////////////
extern "C" void modulateAndNormalize(
fComplex *d_Dst,
fComplex *d_Src,
int fftH,
int fftW,
int padding,
cudaStream_t* devStream
)
{
assert(fftW % 2 == 0);
const int dataSize = fftH * (fftW / 2 + padding);
if (devStream==NULL)
modulateAndNormalize_kernel << <iDivUp(dataSize, 256), 256 >> >(
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
else
modulateAndNormalize_kernel << <iDivUp(dataSize, 256), 256, 0, *devStream >> >(
d_Dst,
d_Src,
dataSize,
1.0f / (float)(fftW *fftH)
);
getLastCudaError("modulateAndNormalize() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// 2D R2C / C2R post/preprocessing kernels
////////////////////////////////////////////////////////////////////////////////
static const double PI = 3.1415926535897932384626433832795;
static const uint BLOCKDIM = 256;
extern "C" void spPostprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
spPostprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n");
}
extern "C" void spPreprocess2D(
void *d_Dst,
void *d_Src,
uint DY,
uint DX,
uint padding,
int dir
)
{
assert(d_Src != d_Dst);
assert(DX % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = DY * (DX / 2);
const double phaseBase = -dir * PI / (double)DX;
SET_FCOMPLEX_BASE;
spPreprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_Src,
DY, DX, threadCount, padding,
(float)phaseBase
);
getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D
////////////////////////////////////////////////////////////////////////////////
extern "C" void spProcess2D(
void *d_Dst,
void *d_SrcA,
void *d_SrcB,
uint DY,
uint DX,
int dir
)
{
assert(DY % 2 == 0);
#if(POWER_OF_TWO)
uint log2DX, log2DY;
uint factorizationRemX = factorRadix2(log2DX, DX);
uint factorizationRemY = factorRadix2(log2DY, DY);
assert(factorizationRemX == 1 && factorizationRemY == 1);
#endif
const uint threadCount = (DY / 2) * DX;
const double phaseBase = dir * PI / (double)DX;
SET_FCOMPLEX_BASE_A;
SET_FCOMPLEX_BASE_B;
spProcess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>(
(fComplex *)d_Dst,
(fComplex *)d_SrcA,
(fComplex *)d_SrcB,
DY, DX, threadCount,
(float)phaseBase,
0.5f / (float)(DY *DX)
);
getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n");
}
|
a6f9f7fdff867c366d3fa0c55c82241a9fe2416c.hip | // !!! This is a file automatically generated by hipify!!!
/*
For DIRECTED GRAPH
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
/***all macros**/
#define MAX_NODE 100000000
#define DEBUG 1
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**all type declaration***/
using namespace std;
class Node{
public:
unsigned int val;
vector<unsigned int> weights;
vector<Node*> Edges;
Node(int val){
this->val = val;
}
void addEdge(Node* v,unsigned int w){
this->Edges.push_back(v);
this->weights.push_back(w);
}
};
/***function declarations***/
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c);
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight );
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges,int& del_size);
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight);
void check_del_path(int u, int v,vector<int> Path, bool& flag);
void check_cycle(int N,int* parent);
void computeTime(float& time,hipEvent_t start, hipEvent_t stop);
/**** device Code *******/
__device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
__global__ void checkMIN(int* PQ_size,int* flagEnd,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
__global__ void propogateDel(int* delEdgesV,int delEdge,int* rev_offset,int* rev_edges,unsigned int* rev_weight,int N,int E,
int* Hx,int* parent,int* parent_old,int* lock,int* addFlag){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[node] > (Cx[p]-Hx[p])+weight+Hx[node] ){
Cx[node] = (Cx[p]-Hx[p] )+weight+Hx[node];
parent[node] = p;
}
start++;
}
}
}
//add inserted edges to propogate
__global__ void propogateAdd(int* diff_off, int* diff_edges,unsigned int* diff_W,int* Hx,int* addFlag,
int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
/*
if(flag_cycle){
printf("Add %d->%d,%d:%d::%d\n",node,child,Cx[node],Cx[child],(Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]);
if(Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]){
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
printf("%d:%d\n",ancestor,Cx[ancestor]);
break;
}
printf("%d:%d::%d ",ancestor,Cx[ancestor],parent[ancestor]);
ancestor = parent_old[ancestor];
}
}
}*/
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//propogate the change
__global__ void propogate(int* nodes, int* size, int* off, int* edge,unsigned int* W,int* Hx,
int N,int E, int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,unsigned int* diff_W,int dE,
int* rev_offset,int* rev_edges,unsigned int* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,unsigned int* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("Entering %d\n",id);
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool flag_cycle_insert = false;
int optimal_parent = node;
while(optimal_parent > 0){
if(optimal_parent == child){
flag_cycle_insert = true;
break;
}
optimal_parent = parent_old[optimal_parent];
}
if(!flag_cycle_insert){
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
//newly added backedges
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool flag_cycle_insert = false;
int optimal_parent = node;
while(optimal_parent > 0){
if(optimal_parent == child){
flag_cycle_insert = true;
break;
}
optimal_parent = parent_old[optimal_parent];
}
if(!flag_cycle_insert){
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
}
start++;
}
}
}
//do in 1 thread
__global__ void insertDest(int* PQ_size, int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
__global__ void getCx(int dest,int* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
/**** main function ****/
int main(){
//the K PQ
int K ;
scanf("%d\n",&K);
int startNode,endNode;
scanf("%d %d",&startNode,&endNode);
FILE* fgraph = fopen("graph.txt","r");
FILE* fgraph_rev = fopen("graph_op.txt","r");
int N,E;
fscanf(fgraph_rev,"%d %d\n",&N,&E);
fscanf(fgraph,"%d %d\n",&N,&E);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
int* H_hx = (int*)malloc(sizeof(int)*N);
int* H_cx = (int*)malloc(sizeof(int)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_PQ = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//for reverse graph
int* H_rev_edges = (int*)malloc(sizeof(int)*E);
int* H_rev_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_rev_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
//for cost of endNode
int* H_dest_cost = (int*)malloc(sizeof(int));
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
//init cx
for(int i=0;i<N;i++){
H_cx[i]=INT_MAX;
H_parent[i]=-1;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
fscanf(fgraph_rev,"%d",&H_rev_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
fscanf(fgraph_rev,"%d",&H_rev_offset[i]);
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%u",&H_weight[i]);
fscanf(fgraph_rev,"%u",&H_rev_weight[i]);
}
FILE* fhx = fopen("Hx.txt","r");
for(int i=0;i<N;i++){
int temp;
fscanf(fhx,"%d",&temp);
if(temp!=-1)
H_hx[i]= temp;
else
H_hx[i] = 0; //to change
}
fclose(fgraph);
fclose(fhx);
fclose(fgraph_rev);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
//insert startNode in PQ[0]
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//create events to record runtime
float run_time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//graph struture
int* D_offset;
int* D_edges ;
unsigned int* D_weight;
int* D_hx;
int* D_parent;
//for reading the ancessostor to avoid lock for write after read.
int* D_parent_old;
//Priority queue size
int* D_PQ_size;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//Diff structure
int* D_diff_edges;
int* D_diff_offset;
unsigned int* D_diff_weight;
//reverse graph
int* D_rev_edges;
int* D_rev_offset;
unsigned int* D_rev_weight;
//reverse diff
int* D_rev_diff_offset;
int* D_rev_diff_edges;
unsigned int* D_rev_diff_weight;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
//cost of endNode
int* D_dest_cost;
//list of nodes v of deleted edges u->
int* D_delEdgesV;
gpuErrchk ( hipMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E) );
gpuErrchk ( hipMalloc(&D_hx,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_parent_old,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( hipMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_lock,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_dest_cost,sizeof(int)) );
//rev graph
gpuErrchk ( hipMalloc(&D_rev_edges,sizeof(int)*E) );
gpuErrchk ( hipMalloc(&D_rev_offset,sizeof(int)*N ) );
gpuErrchk ( hipMalloc(&D_rev_weight,sizeof(unsigned int)*E) );
//for next set of vertices to add in PQ
gpuErrchk ( hipMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( hipMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( hipMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( hipMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( hipMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( hipMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( hipMalloc(&D_flagfound,sizeof(int)) );
gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_hx,H_hx,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_parent,H_parent,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_openList,H_openList,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
//reverse graph
gpuErrchk ( hipMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemset(D_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
if(DEBUG)
printf("[INFO] A* started\n");
hipEventRecord(start);
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_offset,0);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
//gen from flag D_nV
//for N in parallel
hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,K,N,D_openList);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) );
}
}
hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, endNode,D_dest_cost);
gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
vector<int> Path;
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
Path.push_back(p);
p = H_parent[p];
}
Path.push_back(p);
printf("%d\n",p);
}
else{
printf("not found\n");
}
//reverse the path to get from source to end
reverse(Path.begin(),Path.end());
//
// check_cycle(N,H_parent);
///////////////////////////////////////////////
// A star complete //
FILE* fdiff = fopen("Updates.txt","r");
int line;
int update_count = 0;
while(fscanf(fdiff,"%d\n",&line)!=EOF){
//list of nodes v of deleted edges u->v
int* H_delEdgesV = (int*)malloc(sizeof(int)*E);
gpuErrchk ( hipMalloc(&D_delEdgesV,sizeof(int)*E) );
unordered_map<unsigned int,Node*> Graph;
unordered_map<unsigned int,Node*> rev_Graph;
bool flag_do_a_star = false;
int insertEdge=0, delEdge=0;
int delEdgesV_size = 0; //v whose cost can change due to deletion
for(int i=0;i<line;i++){
int flag;
int u,v;
unsigned int w;
fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w);
if(flag==1){
insertDiff(Graph,u,v,w);
insertDiff(rev_Graph,v,u,w);
insertEdge++;
}
else if(flag==0){
//check id del edges in optimal path.
check_del_path(u,v,Path,flag_do_a_star);
//passed delEdge by address
removeDelEdges(u,v,H_offset,H_edges,N,E,H_rev_offset,H_rev_edges,delEdge);
//add to list only if its cost changes due to this deletion
if(H_parent[v]==u){
H_delEdgesV[delEdgesV_size]=v;
delEdgesV_size++;
}
// delEdge++;
}
}
// inseetEdge is insertion size
//for diff
int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge);
int* H_diff_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge);
//diff for revrse graph
int* H_rev_diff_edges = (int*)malloc(sizeof(int)*insertEdge);
int* H_rev_diff_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_rev_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge);
//diff csr
gpuErrchk ( hipMalloc(&D_diff_edges,sizeof(int)*insertEdge) );
gpuErrchk ( hipMalloc(&D_diff_offset,sizeof(int)*(N+1) ) ); //coz
gpuErrchk ( hipMalloc(&D_diff_weight,sizeof(unsigned int)*insertEdge) );
//rev diff graph
gpuErrchk ( hipMalloc(&D_rev_diff_edges,sizeof(int)*insertEdge) );
gpuErrchk ( hipMalloc(&D_rev_diff_offset,sizeof(int)*(N+1) ) );
gpuErrchk ( hipMalloc(&D_rev_diff_weight,sizeof(unsigned int)*insertEdge) );
//reset offset to 0 ..ie no nodes
memset(H_diff_offset,0,sizeof(int)*N);
memset(H_rev_diff_offset,0,sizeof(int)*N);
if(1)
printf("[INFO](%d) insertion:%d, deletion:%d, delaff:%d\n",update_count,insertEdge,delEdge,delEdgesV_size);
createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight);
createDiffGraph(N,rev_Graph,H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight);
//TODO free the graphs
//deleted edges
gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_delEdgesV,H_delEdgesV,sizeof(int)*E,hipMemcpyHostToDevice) );
//diff graph
gpuErrchk ( hipMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*insertEdge,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_diff_weight,H_diff_weight,sizeof(unsigned int)*insertEdge,hipMemcpyHostToDevice) );
//rev diff graph
gpuErrchk ( hipMemcpy(D_rev_diff_edges,H_rev_diff_edges,sizeof(int)*insertEdge,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_diff_offset,H_rev_diff_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_diff_weight,H_rev_diff_weight,sizeof(unsigned int)*insertEdge,hipMemcpyHostToDevice) );
//reset D_nV flag
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
//add del
if(delEdgesV_size>0){
if(DEBUG)
printf("[INFO] Starting computing cost for deletions\n");
//old parent to check cycle
gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) );
int numBlocks_del = ( delEdgesV_size + numThreads -1)/numThreads;
hipEventRecord(start);
hipLaunchKernelGGL(( propogateDel), dim3(numBlocks_del),dim3(numThreads), 0, 0, D_delEdgesV,delEdgesV_size,D_rev_offset,D_rev_edges,D_rev_weight,N,E,
D_hx,D_parent,D_parent_old,D_lock,D_nVFlag);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
}
//
// gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
// check_cycle(N,H_parent);
if(DEBUG)
printf("[INFO] starting computing cost for inserions\n");
gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) );
hipEventRecord(start);
//N parallel
hipLaunchKernelGGL(( propogateAdd), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_diff_offset, D_diff_edges,D_diff_weight,D_hx,D_nVFlag,
D_lock,D_parent,D_parent_old,N,insertEdge);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//
// gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
// check_cycle(N,H_parent);
gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
//gen from flag D_nV
hipEventRecord(start);
hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( hipMemcpy(H_nV_size,D_nV_size, sizeof(int),hipMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
if(DEBUG)
printf("[INFO] starting propogation\n");
while(*H_nV_size > 0){
numBlocks = (*H_nV_size+numThreads-1)/numThreads;
//old parent to check cycle and remove locking on parent
gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) );
//printf("[INFO] update size:%d\n",*H_nV_size);
hipEventRecord(start);
hipLaunchKernelGGL(( propogate), dim3(numBlocks),dim3(numThreads), 0, 0, D_nV,D_nV_size,D_offset,D_edges,D_weight,D_hx,
N,E,D_lock,D_parent,D_parent_old,D_nVFlag,
D_diff_offset,D_diff_edges,D_diff_weight,insertEdge,
D_rev_offset,D_rev_edges,D_rev_weight,
D_rev_diff_offset,D_rev_diff_edges,D_rev_diff_weight);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//reset size=0
gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
//gen from flag D_nV
hipEventRecord(start);
hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( hipMemcpy(H_nV_size,D_nV_size, sizeof(int),hipMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
}
if(DEBUG)
printf("[INFO] updating priority queue\n");
//propogate complete do normal A*
numBlocks = (K+numThreads-1)/numThreads;
//update PQ after propogate
hipEventRecord(start);
hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//check if there is node cost in PQ less than dest
*H_flagEnd = 1;
gpuErrchk( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) );
hipEventRecord(start);
hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk( hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) );
//here flag end represents from above that there is a node with cost lesser
if(*H_flagEnd==0 && flag_do_a_star){
printf("[INFO] doing a* after propogation\n");
hipEventRecord(start);
hipLaunchKernelGGL(( insertDest), dim3(1),dim3(1), 0, 0, D_PQ_size,endNode,D_openList);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//reset flags
*H_flagEnd = 0;
*H_flagfound = 0;
gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
hipEventRecord(start);
hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
hipEventRecord(start);
hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
true,D_diff_offset,D_diff_edges,D_diff_offset,insertEdge);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
hipEventRecord(start);
hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//gen from flag D_nV
//for N in parallel
hipEventRecord(start);
hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
hipEventRecord(start);
hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,K,N,D_openList);
gpuErrchk(hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
//cpy flagend and flagEmpty
gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) );
hipEventRecord(start);
hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk( hipPeekAtLastError() );
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) );
// printf("\ninside MIN\n");
}
}
}
hipEventRecord(start);
hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, endNode,D_dest_cost);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) );
// found or not found based on Cx
gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) );
//remove old path
Path.clear();
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
Path.push_back(p);
p = H_parent[p];
}
Path.push_back(p);
printf("%d\n",p);
}
else{
printf("not found\n");
}
//reverse the path to get from source to end
reverse(Path.begin(),Path.end());
//merge graph
int* H_offset_new,*H_edges_new;
unsigned int* H_weight_new;
int E_new = E + insertEdge - delEdge;
H_offset_new = (int*)malloc(sizeof(int)*N);
H_edges_new = (int*)malloc(sizeof(int)*E_new);
H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new);
mergeDiff(H_offset,H_edges,H_weight,N,E,
H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge,
H_offset_new,H_edges_new,H_weight_new);
//free pointer
free(H_offset);
free(H_edges);
free(H_weight);
free(H_diff_offset);
free(H_diff_edges);
free(H_diff_weight);
H_offset = H_offset_new;
H_edges = H_edges_new;
H_weight = H_weight_new;
//hipFree and cpy
hipFree(D_edges);
hipFree(D_weight);
hipFree(D_diff_edges);
hipFree(D_diff_offset);
hipFree(D_diff_weight);
gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E_new) );
gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E_new) );
gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E_new,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,hipMemcpyHostToDevice) );
//merge rev graph
int* H_rev_offset_new,*H_rev_edges_new;
unsigned int* H_rev_weight_new;
H_rev_offset_new = (int*)malloc(sizeof(int)*N);
H_rev_edges_new = (int*)malloc(sizeof(int)*E_new);
H_rev_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new);
mergeDiff(H_rev_offset,H_rev_edges,H_rev_weight,N,E,
H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight,insertEdge,delEdge,
H_rev_offset_new,H_rev_edges_new,H_rev_weight_new);
free(H_rev_offset);
free(H_rev_edges);
free(H_rev_weight);
free(H_rev_diff_offset);
free(H_rev_diff_edges);
free(H_rev_diff_weight);
H_rev_offset = H_rev_offset_new;
H_rev_edges = H_rev_edges_new;
H_rev_weight = H_rev_weight_new;
//cuda free and cpy
hipFree(D_rev_edges);
hipFree(D_rev_weight);
hipFree(D_rev_diff_edges);
hipFree(D_rev_diff_offset);
hipFree(D_rev_diff_weight);
gpuErrchk ( hipMalloc(&D_rev_edges,sizeof(int)*E_new) );
gpuErrchk ( hipMalloc(&D_rev_weight,sizeof(unsigned int)*E_new) );
gpuErrchk ( hipMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E_new,hipMemcpyHostToDevice) );
gpuErrchk ( hipMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E_new,hipMemcpyHostToDevice) );
//change E
E = E_new;
hipFree(D_delEdgesV);
free(H_delEdgesV);
//inc
update_count++;
}
printf("[INFO] update count: %d\n",update_count);
printf("[INFO] RUNTIME: %f\n",run_time);
//cuda free
// free everything
}
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(a);
if(itr!=Graph.end()){
Node* n = itr->second;
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
else{
Node* n =new Node(a);
Graph.insert(pair<unsigned int,Node*>(a,n));
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
}
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight ){
int offindex = 0;
diffOff[offindex] = 0;
offindex++;
int k =0;
int weightCount = 0;
for(int i=0;i<N;i++){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(i);
if(itr!=Graph.end()){
Node* n = itr->second;
for(int j=0;j<n->Edges.size();j++){
diffEdges[k] = n->Edges[j]->val;
k++;
}
for(int j=0;j<n->weights.size();j++){
diffWeight[weightCount] = n->weights[j];
weightCount++;
}
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
else{
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
}
}
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges,int& del_size){
int start = offset[u];
int end = E;
bool flag_done = false;
bool flag_done_rev = false;
if(u!=N-1)
end = offset[u+1];
while(start<end){
if( v == edges[start]){
edges[start]=-1;
flag_done = true;
break;
}
start++;
}
start = rev_offset[v];
end = E;
if(v!=N-1)
end = rev_offset[v+1];
while(start < end){
if(u == rev_edges[start]){
rev_edges[start] = -1;
flag_done_rev = true;
break;
}
start++;
}
if(flag_done && flag_done_rev)
del_size++;
if( (flag_done && !flag_done_rev)|| (!flag_done && flag_done_rev) )
printf("[ERROR] edge present in front ot back graph\n");
}
void check_del_path(int u, int v,vector<int> Path, bool& flag){
vector<int> :: iterator itr;
itr = find(Path.begin(),Path.end(),u);
if(itr!=Path.end()){
itr+=1;
if(*itr == v)
flag = true;
}
}
void check_cycle(int N,int* parent){
int flag = 0;
for(int i=0;i<N;i++){
vector<int> visited(N,0);
int ancestor = parent[i];
while(ancestor > 0){
if(visited[ancestor]==1){
printf("cycle at: %d, %d\n",i,ancestor);
flag =1;
break;
}
visited[ancestor]=1;
ancestor = parent[ancestor];
}
}
if(flag==0)
printf("no cycle\n");
}
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight){
mOffset[0] = 0;
int edegOffset= 0;
for(int i=0;i<N;i++){
int start = offset[i];
int end = E;
if(i!=N-1)
end = offset[i+1];
int count = 0;
while(start<end){
int child = edges[start];
if(child!=-1){
mEdges[edegOffset+count] = child;
mWeight[edegOffset+count] = weight[start];
count++;
}
start++;
}
start = diff_offset[i];
end = insert_size;
if(i!=N-1)
end = diff_offset[i+1];
while(start<end){
int child = diff_edges[start];
if(child!=-1){
mEdges[edegOffset+count] = child;
mWeight[edegOffset+count]= diff_weight[start];
count++;
}
start++;
}
edegOffset+=count;
if(i!=N-1)
mOffset[i+1]=edegOffset;
}
}
void computeTime(float& time,hipEvent_t start, hipEvent_t stop){
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
time+= milliseconds;
//printf("[INFO] run time: %f, %f\n",time,milliseconds);
} | a6f9f7fdff867c366d3fa0c55c82241a9fe2416c.cu | /*
For DIRECTED GRAPH
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
/***all macros**/
#define MAX_NODE 100000000
#define DEBUG 1
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**all type declaration***/
using namespace std;
class Node{
public:
unsigned int val;
vector<unsigned int> weights;
vector<Node*> Edges;
Node(int val){
this->val = val;
}
void addEdge(Node* v,unsigned int w){
this->Edges.push_back(v);
this->weights.push_back(w);
}
};
/***function declarations***/
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c);
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight );
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges,int& del_size);
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight);
void check_del_path(int u, int v,vector<int> Path, bool& flag);
void check_cycle(int N,int* parent);
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop);
/**** device Code *******/
__device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
__global__ void checkMIN(int* PQ_size,int* flagEnd,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
__global__ void propogateDel(int* delEdgesV,int delEdge,int* rev_offset,int* rev_edges,unsigned int* rev_weight,int N,int E,
int* Hx,int* parent,int* parent_old,int* lock,int* addFlag){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[node] > (Cx[p]-Hx[p])+weight+Hx[node] ){
Cx[node] = (Cx[p]-Hx[p] )+weight+Hx[node];
parent[node] = p;
}
start++;
}
}
}
//add inserted edges to propogate
__global__ void propogateAdd(int* diff_off, int* diff_edges,unsigned int* diff_W,int* Hx,int* addFlag,
int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
/*
if(flag_cycle){
printf("Add %d->%d,%d:%d::%d\n",node,child,Cx[node],Cx[child],(Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]);
if(Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]){
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
printf("%d:%d\n",ancestor,Cx[ancestor]);
break;
}
printf("%d:%d::%d ",ancestor,Cx[ancestor],parent[ancestor]);
ancestor = parent_old[ancestor];
}
}
}*/
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//propogate the change
__global__ void propogate(int* nodes, int* size, int* off, int* edge,unsigned int* W,int* Hx,
int N,int E, int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,unsigned int* diff_W,int dE,
int* rev_offset,int* rev_edges,unsigned int* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,unsigned int* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
// printf("Entering %d\n",id);
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool flag_cycle_insert = false;
int optimal_parent = node;
while(optimal_parent > 0){
if(optimal_parent == child){
flag_cycle_insert = true;
break;
}
optimal_parent = parent_old[optimal_parent];
}
if(!flag_cycle_insert){
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
//newly added backedges
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool flag_cycle_insert = false;
int optimal_parent = node;
while(optimal_parent > 0){
if(optimal_parent == child){
flag_cycle_insert = true;
break;
}
optimal_parent = parent_old[optimal_parent];
}
if(!flag_cycle_insert){
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
}
start++;
}
}
}
//do in 1 thread
__global__ void insertDest(int* PQ_size, int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
__global__ void getCx(int dest,int* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
/**** main function ****/
int main(){
//the K PQ
int K ;
scanf("%d\n",&K);
int startNode,endNode;
scanf("%d %d",&startNode,&endNode);
FILE* fgraph = fopen("graph.txt","r");
FILE* fgraph_rev = fopen("graph_op.txt","r");
int N,E;
fscanf(fgraph_rev,"%d %d\n",&N,&E);
fscanf(fgraph,"%d %d\n",&N,&E);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
int* H_hx = (int*)malloc(sizeof(int)*N);
int* H_cx = (int*)malloc(sizeof(int)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_PQ = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//for reverse graph
int* H_rev_edges = (int*)malloc(sizeof(int)*E);
int* H_rev_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_rev_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
//for cost of endNode
int* H_dest_cost = (int*)malloc(sizeof(int));
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
//init cx
for(int i=0;i<N;i++){
H_cx[i]=INT_MAX;
H_parent[i]=-1;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
fscanf(fgraph_rev,"%d",&H_rev_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
fscanf(fgraph_rev,"%d",&H_rev_offset[i]);
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%u",&H_weight[i]);
fscanf(fgraph_rev,"%u",&H_rev_weight[i]);
}
FILE* fhx = fopen("Hx.txt","r");
for(int i=0;i<N;i++){
int temp;
fscanf(fhx,"%d",&temp);
if(temp!=-1)
H_hx[i]= temp;
else
H_hx[i] = 0; //to change
}
fclose(fgraph);
fclose(fhx);
fclose(fgraph_rev);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
//insert startNode in PQ[0]
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//create events to record runtime
float run_time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//graph struture
int* D_offset;
int* D_edges ;
unsigned int* D_weight;
int* D_hx;
int* D_parent;
//for reading the ancessostor to avoid lock for write after read.
int* D_parent_old;
//Priority queue size
int* D_PQ_size;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//Diff structure
int* D_diff_edges;
int* D_diff_offset;
unsigned int* D_diff_weight;
//reverse graph
int* D_rev_edges;
int* D_rev_offset;
unsigned int* D_rev_weight;
//reverse diff
int* D_rev_diff_offset;
int* D_rev_diff_edges;
unsigned int* D_rev_diff_weight;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
//cost of endNode
int* D_dest_cost;
//list of nodes v of deleted edges u->
int* D_delEdgesV;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_dest_cost,sizeof(int)) );
//rev graph
gpuErrchk ( cudaMalloc(&D_rev_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_rev_offset,sizeof(int)*N ) );
gpuErrchk ( cudaMalloc(&D_rev_weight,sizeof(unsigned int)*E) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
//reverse graph
gpuErrchk ( cudaMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
if(DEBUG)
printf("[INFO] A* started\n");
cudaEventRecord(start);
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_offset,0);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//gen from flag D_nV
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
}
}
getCx<<<1,1>>>(endNode,D_dest_cost);
gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
vector<int> Path;
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
Path.push_back(p);
p = H_parent[p];
}
Path.push_back(p);
printf("%d\n",p);
}
else{
printf("not found\n");
}
//reverse the path to get from source to end
reverse(Path.begin(),Path.end());
//
// check_cycle(N,H_parent);
///////////////////////////////////////////////
// A star complete //
FILE* fdiff = fopen("Updates.txt","r");
int line;
int update_count = 0;
while(fscanf(fdiff,"%d\n",&line)!=EOF){
//list of nodes v of deleted edges u->v
int* H_delEdgesV = (int*)malloc(sizeof(int)*E);
gpuErrchk ( cudaMalloc(&D_delEdgesV,sizeof(int)*E) );
unordered_map<unsigned int,Node*> Graph;
unordered_map<unsigned int,Node*> rev_Graph;
bool flag_do_a_star = false;
int insertEdge=0, delEdge=0;
int delEdgesV_size = 0; //v whose cost can change due to deletion
for(int i=0;i<line;i++){
int flag;
int u,v;
unsigned int w;
fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w);
if(flag==1){
insertDiff(Graph,u,v,w);
insertDiff(rev_Graph,v,u,w);
insertEdge++;
}
else if(flag==0){
//check id del edges in optimal path.
check_del_path(u,v,Path,flag_do_a_star);
//passed delEdge by address
removeDelEdges(u,v,H_offset,H_edges,N,E,H_rev_offset,H_rev_edges,delEdge);
//add to list only if its cost changes due to this deletion
if(H_parent[v]==u){
H_delEdgesV[delEdgesV_size]=v;
delEdgesV_size++;
}
// delEdge++;
}
}
// inseetEdge is insertion size
//for diff
int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge);
int* H_diff_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge);
//diff for revrse graph
int* H_rev_diff_edges = (int*)malloc(sizeof(int)*insertEdge);
int* H_rev_diff_offset = (int*)malloc(sizeof(int)*N);
unsigned int* H_rev_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge);
//diff csr
gpuErrchk ( cudaMalloc(&D_diff_edges,sizeof(int)*insertEdge) );
gpuErrchk ( cudaMalloc(&D_diff_offset,sizeof(int)*(N+1) ) ); //coz
gpuErrchk ( cudaMalloc(&D_diff_weight,sizeof(unsigned int)*insertEdge) );
//rev diff graph
gpuErrchk ( cudaMalloc(&D_rev_diff_edges,sizeof(int)*insertEdge) );
gpuErrchk ( cudaMalloc(&D_rev_diff_offset,sizeof(int)*(N+1) ) );
gpuErrchk ( cudaMalloc(&D_rev_diff_weight,sizeof(unsigned int)*insertEdge) );
//reset offset to 0 ..ie no nodes
memset(H_diff_offset,0,sizeof(int)*N);
memset(H_rev_diff_offset,0,sizeof(int)*N);
if(1)
printf("[INFO](%d) insertion:%d, deletion:%d, delaff:%d\n",update_count,insertEdge,delEdge,delEdgesV_size);
createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight);
createDiffGraph(N,rev_Graph,H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight);
//TODO free the graphs
//deleted edges
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_delEdgesV,H_delEdgesV,sizeof(int)*E,cudaMemcpyHostToDevice) );
//diff graph
gpuErrchk ( cudaMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*insertEdge,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_diff_weight,H_diff_weight,sizeof(unsigned int)*insertEdge,cudaMemcpyHostToDevice) );
//rev diff graph
gpuErrchk ( cudaMemcpy(D_rev_diff_edges,H_rev_diff_edges,sizeof(int)*insertEdge,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_diff_offset,H_rev_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_diff_weight,H_rev_diff_weight,sizeof(unsigned int)*insertEdge,cudaMemcpyHostToDevice) );
//reset D_nV flag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//add del
if(delEdgesV_size>0){
if(DEBUG)
printf("[INFO] Starting computing cost for deletions\n");
//old parent to check cycle
gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) );
int numBlocks_del = ( delEdgesV_size + numThreads -1)/numThreads;
cudaEventRecord(start);
propogateDel<<<numBlocks_del,numThreads>>>(D_delEdgesV,delEdgesV_size,D_rev_offset,D_rev_edges,D_rev_weight,N,E,
D_hx,D_parent,D_parent_old,D_lock,D_nVFlag);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
}
//
// gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
// check_cycle(N,H_parent);
if(DEBUG)
printf("[INFO] starting computing cost for inserions\n");
gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) );
cudaEventRecord(start);
//N parallel
propogateAdd<<<N_numBlocks,numThreads>>>(D_diff_offset, D_diff_edges,D_diff_weight,D_hx,D_nVFlag,
D_lock,D_parent,D_parent_old,N,insertEdge);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//
// gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
// check_cycle(N,H_parent);
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
//gen from flag D_nV
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
if(DEBUG)
printf("[INFO] starting propogation\n");
while(*H_nV_size > 0){
numBlocks = (*H_nV_size+numThreads-1)/numThreads;
//old parent to check cycle and remove locking on parent
gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) );
//printf("[INFO] update size:%d\n",*H_nV_size);
cudaEventRecord(start);
propogate<<<numBlocks,numThreads>>>(D_nV,D_nV_size,D_offset,D_edges,D_weight,D_hx,
N,E,D_lock,D_parent,D_parent_old,D_nVFlag,
D_diff_offset,D_diff_edges,D_diff_weight,insertEdge,
D_rev_offset,D_rev_edges,D_rev_weight,
D_rev_diff_offset,D_rev_diff_edges,D_rev_diff_weight);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//reset size=0
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
//gen from flag D_nV
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
}
if(DEBUG)
printf("[INFO] updating priority queue\n");
//propogate complete do normal A*
numBlocks = (K+numThreads-1)/numThreads;
//update PQ after propogate
cudaEventRecord(start);
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//check if there is node cost in PQ less than dest
*H_flagEnd = 1;
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
cudaEventRecord(start);
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk( cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
//here flag end represents from above that there is a node with cost lesser
if(*H_flagEnd==0 && flag_do_a_star){
printf("[INFO] doing a* after propogation\n");
cudaEventRecord(start);
insertDest<<<1,1>>>(D_PQ_size,endNode,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//reset flags
*H_flagEnd = 0;
*H_flagfound = 0;
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
cudaEventRecord(start);
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
true,D_diff_offset,D_diff_edges,D_diff_offset,insertEdge);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//gen from flag D_nV
//for N in parallel
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
cudaEventRecord(start);
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk( cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
// printf("\ninside MIN\n");
}
}
}
cudaEventRecord(start);
getCx<<<1,1>>>(endNode,D_dest_cost);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
// found or not found based on Cx
gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) );
//remove old path
Path.clear();
printf("[OUT] Cost: %d\n",*H_dest_cost);
printf("[OUT] Path(in reverse): ");
if(*H_dest_cost!=INT_MAX){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
Path.push_back(p);
p = H_parent[p];
}
Path.push_back(p);
printf("%d\n",p);
}
else{
printf("not found\n");
}
//reverse the path to get from source to end
reverse(Path.begin(),Path.end());
//merge graph
int* H_offset_new,*H_edges_new;
unsigned int* H_weight_new;
int E_new = E + insertEdge - delEdge;
H_offset_new = (int*)malloc(sizeof(int)*N);
H_edges_new = (int*)malloc(sizeof(int)*E_new);
H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new);
mergeDiff(H_offset,H_edges,H_weight,N,E,
H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge,
H_offset_new,H_edges_new,H_weight_new);
//free pointer
free(H_offset);
free(H_edges);
free(H_weight);
free(H_diff_offset);
free(H_diff_edges);
free(H_diff_weight);
H_offset = H_offset_new;
H_edges = H_edges_new;
H_weight = H_weight_new;
//cudaFree and cpy
cudaFree(D_edges);
cudaFree(D_weight);
cudaFree(D_diff_edges);
cudaFree(D_diff_offset);
cudaFree(D_diff_weight);
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E_new) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E_new) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) );
//merge rev graph
int* H_rev_offset_new,*H_rev_edges_new;
unsigned int* H_rev_weight_new;
H_rev_offset_new = (int*)malloc(sizeof(int)*N);
H_rev_edges_new = (int*)malloc(sizeof(int)*E_new);
H_rev_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new);
mergeDiff(H_rev_offset,H_rev_edges,H_rev_weight,N,E,
H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight,insertEdge,delEdge,
H_rev_offset_new,H_rev_edges_new,H_rev_weight_new);
free(H_rev_offset);
free(H_rev_edges);
free(H_rev_weight);
free(H_rev_diff_offset);
free(H_rev_diff_edges);
free(H_rev_diff_weight);
H_rev_offset = H_rev_offset_new;
H_rev_edges = H_rev_edges_new;
H_rev_weight = H_rev_weight_new;
//cuda free and cpy
cudaFree(D_rev_edges);
cudaFree(D_rev_weight);
cudaFree(D_rev_diff_edges);
cudaFree(D_rev_diff_offset);
cudaFree(D_rev_diff_weight);
gpuErrchk ( cudaMalloc(&D_rev_edges,sizeof(int)*E_new) );
gpuErrchk ( cudaMalloc(&D_rev_weight,sizeof(unsigned int)*E_new) );
gpuErrchk ( cudaMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) );
//change E
E = E_new;
cudaFree(D_delEdgesV);
free(H_delEdgesV);
//inc
update_count++;
}
printf("[INFO] update count: %d\n",update_count);
printf("[INFO] RUNTIME: %f\n",run_time);
//cuda free
// free everything
}
void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(a);
if(itr!=Graph.end()){
Node* n = itr->second;
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
else{
Node* n =new Node(a);
Graph.insert(pair<unsigned int,Node*>(a,n));
unordered_map<unsigned int,Node*>:: iterator it;
it = Graph.find(b);
if(it!=Graph.end()){
Node* v = it->second;
n->addEdge(v,c);
}
else{
Node* v = new Node(b);
n->addEdge(v,c);
Graph.insert(pair<unsigned int,Node*>(b,v));
}
}
}
void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph,
int* diffOff,int* diffEdges,unsigned int* diffWeight ){
int offindex = 0;
diffOff[offindex] = 0;
offindex++;
int k =0;
int weightCount = 0;
for(int i=0;i<N;i++){
unordered_map<unsigned int,Node*>:: iterator itr;
itr = Graph.find(i);
if(itr!=Graph.end()){
Node* n = itr->second;
for(int j=0;j<n->Edges.size();j++){
diffEdges[k] = n->Edges[j]->val;
k++;
}
for(int j=0;j<n->weights.size();j++){
diffWeight[weightCount] = n->weights[j];
weightCount++;
}
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
else{
if(offindex < N ){
diffOff[offindex] = k;
offindex++;
}
}
}
}
void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges,int& del_size){
int start = offset[u];
int end = E;
bool flag_done = false;
bool flag_done_rev = false;
if(u!=N-1)
end = offset[u+1];
while(start<end){
if( v == edges[start]){
edges[start]=-1;
flag_done = true;
break;
}
start++;
}
start = rev_offset[v];
end = E;
if(v!=N-1)
end = rev_offset[v+1];
while(start < end){
if(u == rev_edges[start]){
rev_edges[start] = -1;
flag_done_rev = true;
break;
}
start++;
}
if(flag_done && flag_done_rev)
del_size++;
if( (flag_done && !flag_done_rev)|| (!flag_done && flag_done_rev) )
printf("[ERROR] edge present in front ot back graph\n");
}
void check_del_path(int u, int v,vector<int> Path, bool& flag){
vector<int> :: iterator itr;
itr = find(Path.begin(),Path.end(),u);
if(itr!=Path.end()){
itr+=1;
if(*itr == v)
flag = true;
}
}
void check_cycle(int N,int* parent){
int flag = 0;
for(int i=0;i<N;i++){
vector<int> visited(N,0);
int ancestor = parent[i];
while(ancestor > 0){
if(visited[ancestor]==1){
printf("cycle at: %d, %d\n",i,ancestor);
flag =1;
break;
}
visited[ancestor]=1;
ancestor = parent[ancestor];
}
}
if(flag==0)
printf("no cycle\n");
}
void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E,
int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size,
int* mOffset,int* mEdges,unsigned int* mWeight){
mOffset[0] = 0;
int edegOffset= 0;
for(int i=0;i<N;i++){
int start = offset[i];
int end = E;
if(i!=N-1)
end = offset[i+1];
int count = 0;
while(start<end){
int child = edges[start];
if(child!=-1){
mEdges[edegOffset+count] = child;
mWeight[edegOffset+count] = weight[start];
count++;
}
start++;
}
start = diff_offset[i];
end = insert_size;
if(i!=N-1)
end = diff_offset[i+1];
while(start<end){
int child = diff_edges[start];
if(child!=-1){
mEdges[edegOffset+count] = child;
mWeight[edegOffset+count]= diff_weight[start];
count++;
}
start++;
}
edegOffset+=count;
if(i!=N-1)
mOffset[i+1]=edegOffset;
}
}
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop){
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
time+= milliseconds;
//printf("[INFO] run time: %f, %f\n",time,milliseconds);
} |
7d430271b59c47613aae9c147d56d1c433ac3488.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
#define N 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads(); // Evita condicin de carrera.
if( 0 == threadIdx.x ) {
int sum = 0;
for(int i = 0; i < N; i++ ) {
sum += temp[i]; //lento
}
*c = sum;
}
}
#define N 2048
#define THREADS_PER_BLOCK 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Hasta que no rellenen todos los thread temp no puedo continuar...
if(threadIdx.x == 0) {
int sum = 0;
for( int i= 0; i < THREADS_PER_BLOCK; i++ ) {
sum += temp[i];
}
c[blockIdx.x] = sum;
}
}
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void mult(int *a, int *b, int *c)
{
int pos = threadIdx.x + blockDim.x * blockIdx.x;
if (pos >= N) return;
c[pos] = a[pos] * b[pos];
}
__global__ void shared_mult(int *a, int *b, int *c)
{
__shared__ int mem[THREADS_PER_BLOCK];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
mem[threadIdx.x] = a[pos] * b[pos];
__syncthreads();
c[pos] = mem[threadIdx.x];
}
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof(int) * N;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
for (int i = 0; i < N ; i++) {
a[i] = b[i] = 3;
}
hipMalloc(&dev_a, size);
hipMalloc(&dev_b, size);
hipMalloc(&dev_c, size);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
hipMemset(dev_c, 0, size);
hipLaunchKernelGGL(( shared_mult), dim3((N - 1) / THREADS_PER_BLOCK + 1), dim3(THREADS_PER_BLOCK), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
for (int i = 0; i < N ; i++) {
fprintf(stdout, "Numb : %d\n", c[i]);
}
hipFree(dev_a); hipFree(dev_b); hipFree(dev_c);
return 0;
}
| 7d430271b59c47613aae9c147d56d1c433ac3488.cu | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
/*
#define N 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads(); // Evita condición de carrera.
if( 0 == threadIdx.x ) {
int sum = 0;
for(int i = 0; i < N; i++ ) {
sum += temp[i]; //lento
}
*c = sum;
}
}
#define N 2048
#define THREADS_PER_BLOCK 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Hasta que no rellenen todos los thread temp no puedo continuar...
if(threadIdx.x == 0) {
int sum = 0;
for( int i= 0; i < THREADS_PER_BLOCK; i++ ) {
sum += temp[i];
}
c[blockIdx.x] = sum;
}
}
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void mult(int *a, int *b, int *c)
{
int pos = threadIdx.x + blockDim.x * blockIdx.x;
if (pos >= N) return;
c[pos] = a[pos] * b[pos];
}
__global__ void shared_mult(int *a, int *b, int *c)
{
__shared__ int mem[THREADS_PER_BLOCK];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
mem[threadIdx.x] = a[pos] * b[pos];
__syncthreads();
c[pos] = mem[threadIdx.x];
}
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof(int) * N;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
for (int i = 0; i < N ; i++) {
a[i] = b[i] = 3;
}
cudaMalloc(&dev_a, size);
cudaMalloc(&dev_b, size);
cudaMalloc(&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemset(dev_c, 0, size);
shared_mult<<<(N - 1) / THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N ; i++) {
fprintf(stdout, "Numb : %d\n", c[i]);
}
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
return 0;
}
|
32989707a5a7fc1d7874bef8fc7eae3b3ed5c3cb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
//const int DATA_N = 1048576 * 32;
const int DATA_N = 2097152 * 32;
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiGPU\n");
checkCudaErrors(hipGetDeviceCount(&GPU_N));
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
printf("CUDA-capable device count: %i\n", GPU_N);
printf("Generating input data...\n\n");
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
gpuBase = 0;
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
gpuBase += plan[i].dataN;
}
//Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipStreamCreate(&plan[i].stream));
//Allocate memory
checkCudaErrors(hipMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float)));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
// create and start timer
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
// start the timer
sdkStartTimer(&timer);
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
checkCudaErrors(hipSetDevice(i));
//Copy input data from CPU
checkCudaErrors(hipMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float), hipMemcpyHostToDevice, plan[i].stream));
//Perform GPU computations
hipLaunchKernelGGL(( reduceKernel), dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream, plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
getLastCudaError("reduceKernel() execution failed.\n");
//Read back GPU results
checkCudaErrors(hipMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float), hipMemcpyDeviceToHost, plan[i].stream));
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
float sum;
//Set device
checkCudaErrors(hipSetDevice(i));
//Wait for all operations to finish
hipStreamSynchronize(plan[i].stream);
//Finalize GPU reduction for current subvector
sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = (float)sum;
//Shut down this GPU
checkCudaErrors(hipHostFree(plan[i].h_Sum_from_device));
checkCudaErrors(hipFree(plan[i].d_Sum));
checkCudaErrors(hipFree(plan[i].d_Data));
checkCudaErrors(hipStreamDestroy(plan[i].stream));
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
sdkStopTimer(&timer);
printf(" GPU Processing time: %f (ms)\n\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipHostFree(plan[i].h_Data));
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 32989707a5a7fc1d7874bef8fc7eae3b3ed5c3cb.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
//const int DATA_N = 1048576 * 32;
const int DATA_N = 2097152 * 32;
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA Sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for (int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
//Solver config
TGPUplan plan[MAX_GPU_COUNT];
//GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
printf("Starting simpleMultiGPU\n");
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
printf("CUDA-capable device count: %i\n", GPU_N);
printf("Generating input data...\n\n");
//Subdividing input data across GPUs
//Get data sizes for each GPU
for (i = 0; i < GPU_N; i++)
{
plan[i].dataN = DATA_N / GPU_N;
}
//Take into account "odd" data sizes
for (i = 0; i < DATA_N % GPU_N; i++)
{
plan[i].dataN++;
}
//Assign data ranges to GPUs
gpuBase = 0;
for (i = 0; i < GPU_N; i++)
{
plan[i].h_Sum = h_SumGPU + i;
gpuBase += plan[i].dataN;
}
//Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaStreamCreate(&plan[i].stream));
//Allocate memory
checkCudaErrors(cudaMalloc((void **)&plan[i].d_Data, plan[i].dataN * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&plan[i].d_Sum, ACCUM_N * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&plan[i].h_Sum_from_device, ACCUM_N * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&plan[i].h_Data, plan[i].dataN * sizeof(float)));
for (j = 0; j < plan[i].dataN; j++)
{
plan[i].h_Data[j] = (float)rand() / (float)RAND_MAX;
}
}
//Start timing and compute on GPU(s)
printf("Computing with %d GPUs...\n", GPU_N);
// create and start timer
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
// start the timer
sdkStartTimer(&timer);
//Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++)
{
//Set device
checkCudaErrors(cudaSetDevice(i));
//Copy input data from CPU
checkCudaErrors(cudaMemcpyAsync(plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof(float), cudaMemcpyHostToDevice, plan[i].stream));
//Perform GPU computations
reduceKernel<<<BLOCK_N, THREAD_N, 0, plan[i].stream>>>(plan[i].d_Sum, plan[i].d_Data, plan[i].dataN);
getLastCudaError("reduceKernel() execution failed.\n");
//Read back GPU results
checkCudaErrors(cudaMemcpyAsync(plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N *sizeof(float), cudaMemcpyDeviceToHost, plan[i].stream));
}
//Process GPU results
for (i = 0; i < GPU_N; i++)
{
float sum;
//Set device
checkCudaErrors(cudaSetDevice(i));
//Wait for all operations to finish
cudaStreamSynchronize(plan[i].stream);
//Finalize GPU reduction for current subvector
sum = 0;
for (j = 0; j < ACCUM_N; j++)
{
sum += plan[i].h_Sum_from_device[j];
}
*(plan[i].h_Sum) = (float)sum;
//Shut down this GPU
checkCudaErrors(cudaFreeHost(plan[i].h_Sum_from_device));
checkCudaErrors(cudaFree(plan[i].d_Sum));
checkCudaErrors(cudaFree(plan[i].d_Data));
checkCudaErrors(cudaStreamDestroy(plan[i].stream));
}
sumGPU = 0;
for (i = 0; i < GPU_N; i++)
{
sumGPU += h_SumGPU[i];
}
sdkStopTimer(&timer);
printf(" GPU Processing time: %f (ms)\n\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
// Compute on Host CPU
printf("Computing with Host CPU...\n\n");
sumCPU = 0;
for (i = 0; i < GPU_N; i++)
{
for (j = 0; j < plan[i].dataN; j++)
{
sumCPU += plan[i].h_Data[j];
}
}
// Compare GPU and CPU results
printf("Comparing GPU and Host CPU results...\n");
diff = fabs(sumCPU - sumGPU) / fabs(sumCPU);
printf(" GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU);
printf(" Relative difference: %E \n\n", diff);
// Cleanup and shutdown
for (i = 0; i < GPU_N; i++)
{
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaFreeHost(plan[i].h_Data));
}
exit((diff < 1e-5) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
58dea341d68e52bd228509d316cf13c8930f121f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#define NUM_ELEMENTS 7
#define MAX_ELEMENTS_BLOCK 2048
struct Point
{
unsigned int X;
unsigned int Y;
unsigned int leftID; // counter-clockwise neighbor
unsigned int rightID; // clockwise neighbor
};
extern __shared__ Point hullData[];
hipError_t convexHull(Point* h_data, int numPoints);
Point* h_data;
Point* d_data;
__device__ void findHull(int &currA, int &currB)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currA].rightID];
}
bool hullFound = false;
while (!hullFound)
{
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && hullData[currA].leftID != currAorig)
{
currA = hullData[currA].leftID;
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result >= 0 && hullData[currB].rightID != currBorig)
{
currB = hullData[currB].rightID;
c = hullData[hullData[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && hullData[currA].rightID != currAorig)
{
currA = hullData[currA].rightID;
c = hullData[hullData[currA].rightID];
}
else
{
c = hullData[hullData[currB].leftID];
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result <= 0 && hullData[currB].leftID != currBorig)
{
currB = hullData[currB].leftID;
c = hullData[hullData[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__device__ void findHull(int &currA, int &currB, Point* data)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = data[data[currA].leftID];
}
else
{
c = data[data[currA].rightID];
}
//if (threadIdx.x == 0)
// printf("thread: %d\n currA: %d ( %d, %d ) currB: %d ( %d, %d ) c: ( %d, %d )\n", threadIdx.x, currA, data[currA].X, data[currA].Y, currB, data[currB].X, data[currB].Y, c.X, c.Y);
/*if (threadIdx.x == 0 && blockIdx.x == 0)
{
int j = 0;
int stop = j;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<50; i++)
{
if (j == stop && i != 0)
break;
printf("i: %d ( %d, %d )\nr: %d l: %d\n\n", i, data[j].X, data[j].Y, data[j].rightID, data[j].leftID);
j = data[j].rightID;
count++;
}
printf("\nCount: %d\n", count);
}*/
bool hullFound = false;
while (!hullFound)
{
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && data[currA].leftID != currAorig)
{
currA = data[currA].leftID;
c = data[data[currA].leftID];
}
else
{
c = data[data[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result >= 0 && data[currB].rightID != currBorig)
{
currB = data[currB].rightID;
c = data[data[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && data[currA].rightID != currAorig)
{
currA = data[currA].rightID;
c = data[data[currA].rightID];
}
else
{
c = data[data[currB].leftID];
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result <= 0 && data[currB].leftID != currBorig)
{
currB = data[currB].leftID;
c = data[data[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__global__ void divideAndConquer(Point* data, int numElements)
{
int idx = threadIdx.x;
int bidx = blockIdx.x;
int numElementsPBlock = blockDim.x * 2;
int numThreads = blockDim.x;
//int numBlocks = gridDim.x;
bool isEven = (idx % 2) == 0;
/*
if (idx == 0)
{
printf("%d\n", idx);
printf("%d\n", bidx);
printf("%d\n", numElementsPBlock);
printf("%d\n", numElements);
printf("%d\n", numThreads);
printf("%d\n", numBlocks);
}*/
hullData[idx] = data[idx + (numElementsPBlock * bidx)];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
hullData[idx + numThreads] = data[(idx + (numElementsPBlock * bidx)) + numThreads];
/*hullData[idx + (2 * blockDim.x)] = data[idx + (2 * blockDim.x)];
hullData[idx + (3 * blockDim.x)] = data[idx + (3 * blockDim.x)];*/
__syncthreads();
if ((idx << 1) + 1 < numElementsPBlock)
{
hullData[(idx << 1)].leftID = (idx << 1) + 1;
hullData[(idx << 1)].rightID = (idx << 1) + 1;
hullData[(idx << 1) + 1].leftID = (idx << 1);
hullData[(idx << 1) + 1].rightID = (idx << 1);
}
else
{
hullData[(idx << 1)].leftID = (idx << 1);
hullData[(idx << 1)].rightID = (idx << 1);
}
//printf("thread: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);
/*int currA = startIndex + 1;
int currB = startIndex + 2;
Point c = hullData[hullData[currA].leftID];
if (!isEven)
{
c = hullData[hullData[currA].rightID];
}*/
//int startIndex;
int currA;
int currB;
__syncthreads();
for (int i = 1; i < ((numElementsPBlock + 1) / 2); i *= 2)
{
int index = 4 * i * (idx / 2);
/*if (idx == 0)
printf("-------------------- i = %d --------------------\n", i);
__syncthreads();
if (i == 2 && (idx == 49 || idx == 48))
printf("thread: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, (idx << 1), hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, ((idx << 1) + 1), hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);*/
if (index + (i << 1) < numElementsPBlock)
{
currA = index + (i << 1) - 1;
currB = index + (i << 1);
findHull(currA, currB);
}
__syncthreads();
if (index + (i << 1) < numElementsPBlock)
{
if (isEven)
{
hullData[currA].rightID = currB;
hullData[currB].leftID = currA;
}
else
{
hullData[currA].leftID = currB;
hullData[currB].rightID = currA;
}
}
//__syncthreads();
// if (isEven)
// {
// int j = 0;
// int count = 0;
// //printf("%d\n", h_data[0].rightID);
// for (int i=0; i<numElements; i++)
// {
// if (j != 0 || i == 0)
// printf("id: %d %d, %d\n", idx, hullData[j].X, hullData[j].Y);
// else
// break;
// j = hullData[j].rightID;
// count++;
// //system("PAUSE");
//
// }
// __syncthreads();
// printf("\nCount: %d\n", count);
// }
}
__syncthreads();
hullData[idx].rightID = (hullData[idx].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx].leftID = (hullData[idx].leftID + (numElementsPBlock * blockIdx.x));
if (idx + numThreads < numElementsPBlock)
{
hullData[idx + numThreads].rightID = (hullData[idx + numThreads].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx + numThreads].leftID = (hullData[idx + numThreads].leftID + (numElementsPBlock * blockIdx.x));
}
__syncthreads();
data[idx + (numElementsPBlock * bidx)] = hullData[idx];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
data[(idx + (numElementsPBlock * bidx)) + numThreads] = hullData[idx + numThreads];
__syncthreads();
}
__global__ void divideAndConquerBlocks(Point* data, int numElements, int iteration)
{
int idx = threadIdx.x;
//int bidx = blockIdx.x;
bool isEven = (idx % 2) == 0;
int currA;
int currB;
//currA = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration) - 1;
//currB = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration);
currA = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration) - 1;
currB = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration);
//printf("Id: %d Before FindHull--- currA: %d currB: %d\n", idx, currA, currB);
findHull(currA, currB, data);
//printf("Id: %d After FindHull--- currA: %d currB: %d\n", idx, currA, currB);
__syncthreads();
if (isEven)
{
data[currA].rightID = currB;
data[currB].leftID = currA;
}
else
{
data[currA].leftID = currB;
data[currB].rightID = currA;
}
__syncthreads();
}
int main(int argc, char** argv)
{
FILE* input;
if (argc > 1)
{
input = fopen(argv[1], "r");
}
else
{
input = fopen("sorted_8192.txt", "r");
}
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
fscanf(input, "%d", &numPoints);
//printf("%d\n", numPoints);
//system("PAUSE");
h_data = (Point*)malloc(sizeof(Point) * numPoints);
//initialize input
for (int i = 0; i < numPoints; i++){
fscanf(input, "%d %d", &h_data[i].X, &h_data[i].Y);
}
hipError_t cudaStatus = convexHull(h_data, numPoints);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
//system("PAUSE");
return 1;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
//system("PAUSE");
return 1;
}
free(h_data);
//system("PAUSE");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t convexHull(Point* h_data, int numPoints)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = hipMalloc((void**)&d_data, numPoints * sizeof(Point));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(d_data, h_data, numPoints * sizeof(Point), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
//printf("\n\nNum Threads to be launched: %d\n\n", numThreads);
int numBlocks = 1;
if ((numPoints % MAX_ELEMENTS_BLOCK) == 0)
numBlocks = (numPoints / MAX_ELEMENTS_BLOCK);
else
numBlocks = ((numPoints / MAX_ELEMENTS_BLOCK) + 1);
int numThreads = 1;
if (numBlocks > 1)
numThreads = (MAX_ELEMENTS_BLOCK / 2);
else
numThreads = ((numPoints + 1) / 2);
printf("\n----------Starting first DnC---------\nnumBlocks: %d numThreads: %d\n\n", numBlocks, numThreads);
hipLaunchKernelGGL(( divideAndConquer), dim3(numBlocks), dim3(numThreads), sizeof(Point) * (numThreads * 2), 0, d_data, numPoints);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
int j = 1;
for (int i = 1; i < numBlocks; i *= 2)
{
int newNumBlocks = (numBlocks / i) / (MAX_ELEMENTS_BLOCK / 4);
if (newNumBlocks == 0)
newNumBlocks++;
int newNumThreads = 1;
int num = 1024;
if (newNumBlocks > 1)
newNumThreads = (MAX_ELEMENTS_BLOCK / 2);
else
{
if (numBlocks > 1024)
{
newNumThreads = (num / j);
}
else
{
newNumThreads = (numBlocks / j);
}
j *= 2;
}
printf("\n----------Starting second DnC---------\nnewNumBlocks: %d newNumThreads: %d\n\n", newNumBlocks, newNumThreads);
hipLaunchKernelGGL(( divideAndConquerBlocks), dim3(newNumBlocks), dim3(newNumThreads), 0, 0, d_data, numPoints, i);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", hipGetErrorString(cudaStatus));
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
cudaStatus = hipMemcpy(h_data, d_data, numPoints * sizeof(Point), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
}
hipFree(d_data);
j = 0;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<numPoints; i++)
{
if (j == 0 && i != 0)
break;
printf("j: %d ( %d, %d )\nr: %d l: %d\n\n", j, h_data[j].X, h_data[j].Y, h_data[j].rightID, h_data[j].leftID);
j = h_data[j].rightID;
count++;
//system("PAUSE");
}
printf("\nCount: %d\n", count);
/*for (int i = 0; i < numPoints; i++)
{
printf("%d, %d\n", h_data[i].X, h_data[i].Y);
system("PAUSE");
}*/
return cudaStatus;
}
| 58dea341d68e52bd228509d316cf13c8930f121f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#define NUM_ELEMENTS 7
#define MAX_ELEMENTS_BLOCK 2048
struct Point
{
unsigned int X;
unsigned int Y;
unsigned int leftID; // counter-clockwise neighbor
unsigned int rightID; // clockwise neighbor
};
extern __shared__ Point hullData[];
cudaError_t convexHull(Point* h_data, int numPoints);
Point* h_data;
Point* d_data;
__device__ void findHull(int &currA, int &currB)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currA].rightID];
}
bool hullFound = false;
while (!hullFound)
{
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && hullData[currA].leftID != currAorig)
{
currA = hullData[currA].leftID;
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result >= 0 && hullData[currB].rightID != currBorig)
{
currB = hullData[currB].rightID;
c = hullData[hullData[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && hullData[currA].rightID != currAorig)
{
currA = hullData[currA].rightID;
c = hullData[hullData[currA].rightID];
}
else
{
c = hullData[hullData[currB].leftID];
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result <= 0 && hullData[currB].leftID != currBorig)
{
currB = hullData[currB].leftID;
c = hullData[hullData[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__device__ void findHull(int &currA, int &currB, Point* data)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = data[data[currA].leftID];
}
else
{
c = data[data[currA].rightID];
}
//if (threadIdx.x == 0)
// printf("thread: %d\n currA: %d ( %d, %d ) currB: %d ( %d, %d ) c: ( %d, %d )\n", threadIdx.x, currA, data[currA].X, data[currA].Y, currB, data[currB].X, data[currB].Y, c.X, c.Y);
/*if (threadIdx.x == 0 && blockIdx.x == 0)
{
int j = 0;
int stop = j;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<50; i++)
{
if (j == stop && i != 0)
break;
printf("i: %d ( %d, %d )\nr: %d l: %d\n\n", i, data[j].X, data[j].Y, data[j].rightID, data[j].leftID);
j = data[j].rightID;
count++;
}
printf("\nCount: %d\n", count);
}*/
bool hullFound = false;
while (!hullFound)
{
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && data[currA].leftID != currAorig)
{
currA = data[currA].leftID;
c = data[data[currA].leftID];
}
else
{
c = data[data[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result >= 0 && data[currB].rightID != currBorig)
{
currB = data[currB].rightID;
c = data[data[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && data[currA].rightID != currAorig)
{
currA = data[currA].rightID;
c = data[data[currA].rightID];
}
else
{
c = data[data[currB].leftID];
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result <= 0 && data[currB].leftID != currBorig)
{
currB = data[currB].leftID;
c = data[data[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__global__ void divideAndConquer(Point* data, int numElements)
{
int idx = threadIdx.x;
int bidx = blockIdx.x;
int numElementsPBlock = blockDim.x * 2;
int numThreads = blockDim.x;
//int numBlocks = gridDim.x;
bool isEven = (idx % 2) == 0;
/*
if (idx == 0)
{
printf("%d\n", idx);
printf("%d\n", bidx);
printf("%d\n", numElementsPBlock);
printf("%d\n", numElements);
printf("%d\n", numThreads);
printf("%d\n", numBlocks);
}*/
hullData[idx] = data[idx + (numElementsPBlock * bidx)];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
hullData[idx + numThreads] = data[(idx + (numElementsPBlock * bidx)) + numThreads];
/*hullData[idx + (2 * blockDim.x)] = data[idx + (2 * blockDim.x)];
hullData[idx + (3 * blockDim.x)] = data[idx + (3 * blockDim.x)];*/
__syncthreads();
if ((idx << 1) + 1 < numElementsPBlock)
{
hullData[(idx << 1)].leftID = (idx << 1) + 1;
hullData[(idx << 1)].rightID = (idx << 1) + 1;
hullData[(idx << 1) + 1].leftID = (idx << 1);
hullData[(idx << 1) + 1].rightID = (idx << 1);
}
else
{
hullData[(idx << 1)].leftID = (idx << 1);
hullData[(idx << 1)].rightID = (idx << 1);
}
//printf("thread: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);
/*int currA = startIndex + 1;
int currB = startIndex + 2;
Point c = hullData[hullData[currA].leftID];
if (!isEven)
{
c = hullData[hullData[currA].rightID];
}*/
//int startIndex;
int currA;
int currB;
__syncthreads();
for (int i = 1; i < ((numElementsPBlock + 1) / 2); i *= 2)
{
int index = 4 * i * (idx / 2);
/*if (idx == 0)
printf("-------------------- i = %d --------------------\n", i);
__syncthreads();
if (i == 2 && (idx == 49 || idx == 48))
printf("thread: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, (idx << 1), hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, ((idx << 1) + 1), hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);*/
if (index + (i << 1) < numElementsPBlock)
{
currA = index + (i << 1) - 1;
currB = index + (i << 1);
findHull(currA, currB);
}
__syncthreads();
if (index + (i << 1) < numElementsPBlock)
{
if (isEven)
{
hullData[currA].rightID = currB;
hullData[currB].leftID = currA;
}
else
{
hullData[currA].leftID = currB;
hullData[currB].rightID = currA;
}
}
//__syncthreads();
// if (isEven)
// {
// int j = 0;
// int count = 0;
// //printf("%d\n", h_data[0].rightID);
// for (int i=0; i<numElements; i++)
// {
// if (j != 0 || i == 0)
// printf("id: %d %d, %d\n", idx, hullData[j].X, hullData[j].Y);
// else
// break;
// j = hullData[j].rightID;
// count++;
// //system("PAUSE");
//
// }
// __syncthreads();
// printf("\nCount: %d\n", count);
// }
}
__syncthreads();
hullData[idx].rightID = (hullData[idx].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx].leftID = (hullData[idx].leftID + (numElementsPBlock * blockIdx.x));
if (idx + numThreads < numElementsPBlock)
{
hullData[idx + numThreads].rightID = (hullData[idx + numThreads].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx + numThreads].leftID = (hullData[idx + numThreads].leftID + (numElementsPBlock * blockIdx.x));
}
__syncthreads();
data[idx + (numElementsPBlock * bidx)] = hullData[idx];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
data[(idx + (numElementsPBlock * bidx)) + numThreads] = hullData[idx + numThreads];
__syncthreads();
}
__global__ void divideAndConquerBlocks(Point* data, int numElements, int iteration)
{
int idx = threadIdx.x;
//int bidx = blockIdx.x;
bool isEven = (idx % 2) == 0;
int currA;
int currB;
//currA = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration) - 1;
//currB = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration);
currA = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration) - 1;
currB = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration);
//printf("Id: %d Before FindHull--- currA: %d currB: %d\n", idx, currA, currB);
findHull(currA, currB, data);
//printf("Id: %d After FindHull--- currA: %d currB: %d\n", idx, currA, currB);
__syncthreads();
if (isEven)
{
data[currA].rightID = currB;
data[currB].leftID = currA;
}
else
{
data[currA].leftID = currB;
data[currB].rightID = currA;
}
__syncthreads();
}
int main(int argc, char** argv)
{
FILE* input;
if (argc > 1)
{
input = fopen(argv[1], "r");
}
else
{
input = fopen("sorted_8192.txt", "r");
}
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
fscanf(input, "%d", &numPoints);
//printf("%d\n", numPoints);
//system("PAUSE");
h_data = (Point*)malloc(sizeof(Point) * numPoints);
//initialize input
for (int i = 0; i < numPoints; i++){
fscanf(input, "%d %d", &h_data[i].X, &h_data[i].Y);
}
cudaError_t cudaStatus = convexHull(h_data, numPoints);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
//system("PAUSE");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
//system("PAUSE");
return 1;
}
free(h_data);
//system("PAUSE");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t convexHull(Point* h_data, int numPoints)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = cudaMalloc((void**)&d_data, numPoints * sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(d_data, h_data, numPoints * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
//printf("\n\nNum Threads to be launched: %d\n\n", numThreads);
int numBlocks = 1;
if ((numPoints % MAX_ELEMENTS_BLOCK) == 0)
numBlocks = (numPoints / MAX_ELEMENTS_BLOCK);
else
numBlocks = ((numPoints / MAX_ELEMENTS_BLOCK) + 1);
int numThreads = 1;
if (numBlocks > 1)
numThreads = (MAX_ELEMENTS_BLOCK / 2);
else
numThreads = ((numPoints + 1) / 2);
printf("\n----------Starting first DnC---------\nnumBlocks: %d numThreads: %d\n\n", numBlocks, numThreads);
divideAndConquer<<<numBlocks, numThreads, sizeof(Point) * (numThreads * 2)>>>(d_data, numPoints);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
int j = 1;
for (int i = 1; i < numBlocks; i *= 2)
{
int newNumBlocks = (numBlocks / i) / (MAX_ELEMENTS_BLOCK / 4);
if (newNumBlocks == 0)
newNumBlocks++;
int newNumThreads = 1;
int num = 1024;
if (newNumBlocks > 1)
newNumThreads = (MAX_ELEMENTS_BLOCK / 2);
else
{
if (numBlocks > 1024)
{
newNumThreads = (num / j);
}
else
{
newNumThreads = (numBlocks / j);
}
j *= 2;
}
printf("\n----------Starting second DnC---------\nnewNumBlocks: %d newNumThreads: %d\n\n", newNumBlocks, newNumThreads);
divideAndConquerBlocks<<<newNumBlocks, newNumThreads>>>(d_data, numPoints, i);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
cudaStatus = cudaMemcpy(h_data, d_data, numPoints * sizeof(Point), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaFree(d_data);
j = 0;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<numPoints; i++)
{
if (j == 0 && i != 0)
break;
printf("j: %d ( %d, %d )\nr: %d l: %d\n\n", j, h_data[j].X, h_data[j].Y, h_data[j].rightID, h_data[j].leftID);
j = h_data[j].rightID;
count++;
//system("PAUSE");
}
printf("\nCount: %d\n", count);
/*for (int i = 0; i < numPoints; i++)
{
printf("%d, %d\n", h_data[i].X, h_data[i].Y);
system("PAUSE");
}*/
return cudaStatus;
}
|
25e3da756ab32ea978318e2686dfd9a322eecd41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -o - | FileCheck --check-prefixes=COMMON,CHECK %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -disable-O0-optnone -o - | opt -S -O2 | FileCheck %s --check-prefixes=COMMON,OPT
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -x hip %s -o - | FileCheck -check-prefix=HOST %s
#include "Inputs/cuda.h"
// Coerced struct from `struct S` without all generic pointers lowered into
// global ones.
// On the host-side compilation, generic pointer won't be coerced.
// HOST-NOT: %struct.S.coerce
// HOST-NOT: %struct.T.coerce
// HOST: define void @_Z22__device_stub__kernel1Pi(i32* %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel1Pi(i32 addrspace(1)*{{.*}} %x.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel1(int *x) {
x[0]++;
}
// HOST: define void @_Z22__device_stub__kernel2Ri(i32* nonnull align 4 dereferenceable(4) %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel2Ri(i32 addrspace(1)*{{.*}} nonnull align 4 dereferenceable(4) %x.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel2(int &x) {
x++;
}
// HOST: define void @_Z22__device_stub__kernel3PU3AS2iPU3AS1i(i32 addrspace(2)* %x, i32 addrspace(1)* %y)
// CHECK-LABEL: define amdgpu_kernel void @_Z7kernel3PU3AS2iPU3AS1i(i32 addrspace(2)*{{.*}} %x, i32 addrspace(1)*{{.*}} %y)
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__global__ void kernel3(__attribute__((address_space(2))) int *x,
__attribute__((address_space(1))) int *y) {
y[0] = x[0];
}
// COMMON-LABEL: define void @_Z4funcPi(i32*{{.*}} %x)
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__device__ void func(int *x) {
x[0]++;
}
struct S {
int *x;
float *y;
};
// `by-val` struct will be coerced into a similar struct with all generic
// pointers lowerd into global ones.
// HOST: define void @_Z22__device_stub__kernel41S(i32* %s.coerce0, float* %s.coerce1)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel41S(%struct.S addrspace(4)*{{.*}} byref(%struct.S) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0
// OPT: [[P0:%.*]] = load i32*, i32* addrspace(4)* [[R0]], align 8
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[V0:%.*]] = load i32, i32* [[P0]], align 4
// OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1
// OPT: store i32 [[INC]], i32* [[P0]], align 4
// OPT: [[V1:%.*]] = load float, float* [[P1]], align 4
// OPT: [[ADD:%.*]] = fadd contract float [[V1]], 1.000000e+00
// OPT: store float [[ADD]], float* [[P1]], align 4
// OPT: ret void
__global__ void kernel4(struct S s) {
s.x[0]++;
s.y[0] += 1.f;
}
// If a pointer to struct is passed, only the pointer itself is coerced into the global one.
// HOST: define void @_Z22__device_stub__kernel5P1S(%struct.S* %s)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel5P1S(%struct.S addrspace(1)*{{.*}} %s.coerce)
__global__ void kernel5(struct S *s) {
s->x[0]++;
s->y[0] += 1.f;
}
struct T {
float *x[2];
};
// `by-val` array is also coerced.
// HOST: define void @_Z22__device_stub__kernel61T(float* %t.coerce0, float* %t.coerce1)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel61T(%struct.T addrspace(4)*{{.*}} byref(%struct.T) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 0
// OPT: [[P0:%.*]] = load float*, float* addrspace(4)* [[R0]], align 8
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[V0:%.*]] = load float, float* [[P0]], align 4
// OPT: [[ADD0:%.*]] = fadd contract float [[V0]], 1.000000e+00
// OPT: store float [[ADD0]], float* [[P0]], align 4
// OPT: [[V1:%.*]] = load float, float* [[P1]], align 4
// OPT: [[ADD1:%.*]] = fadd contract float [[V1]], 2.000000e+00
// OPT: store float [[ADD1]], float* [[P1]], align 4
// OPT: ret void
__global__ void kernel6(struct T t) {
t.x[0][0] += 1.f;
t.x[1][0] += 2.f;
}
// Check that coerced pointers retain the noalias attribute when qualified with __restrict.
// HOST: define void @_Z22__device_stub__kernel7Pi(i32* noalias %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel7Pi(i32 addrspace(1)* noalias{{.*}} %x.coerce)
__global__ void kernel7(int *__restrict x) {
x[0]++;
}
// Single element struct.
struct SS {
float *x;
};
// HOST: define void @_Z22__device_stub__kernel82SS(float* %a.coerce)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel82SS(float addrspace(1)*{{.*}} %a.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load float, float addrspace(1)* %a.coerce, align 4
// OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00
// OPT: store float [[INC]], float addrspace(1)* %a.coerce, align 4
// OPT: ret void
__global__ void kernel8(struct SS a) {
*a.x += 3.f;
}
| 25e3da756ab32ea978318e2686dfd9a322eecd41.cu | // REQUIRES: x86-registered-target
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -o - | FileCheck --check-prefixes=COMMON,CHECK %s
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -emit-llvm -x hip %s -disable-O0-optnone -o - | opt -S -O2 | FileCheck %s --check-prefixes=COMMON,OPT
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm -x hip %s -o - | FileCheck -check-prefix=HOST %s
#include "Inputs/cuda.h"
// Coerced struct from `struct S` without all generic pointers lowered into
// global ones.
// On the host-side compilation, generic pointer won't be coerced.
// HOST-NOT: %struct.S.coerce
// HOST-NOT: %struct.T.coerce
// HOST: define void @_Z22__device_stub__kernel1Pi(i32* %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel1Pi(i32 addrspace(1)*{{.*}} %x.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel1(int *x) {
x[0]++;
}
// HOST: define void @_Z22__device_stub__kernel2Ri(i32* nonnull align 4 dereferenceable(4) %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel2Ri(i32 addrspace(1)*{{.*}} nonnull align 4 dereferenceable(4) %x.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load i32, i32 addrspace(1)* %x.coerce, align 4
// OPT: [[INC:%.*]] = add nsw i32 [[VAL]], 1
// OPT: store i32 [[INC]], i32 addrspace(1)* %x.coerce, align 4
// OPT: ret void
__global__ void kernel2(int &x) {
x++;
}
// HOST: define void @_Z22__device_stub__kernel3PU3AS2iPU3AS1i(i32 addrspace(2)* %x, i32 addrspace(1)* %y)
// CHECK-LABEL: define amdgpu_kernel void @_Z7kernel3PU3AS2iPU3AS1i(i32 addrspace(2)*{{.*}} %x, i32 addrspace(1)*{{.*}} %y)
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__global__ void kernel3(__attribute__((address_space(2))) int *x,
__attribute__((address_space(1))) int *y) {
y[0] = x[0];
}
// COMMON-LABEL: define void @_Z4funcPi(i32*{{.*}} %x)
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
__device__ void func(int *x) {
x[0]++;
}
struct S {
int *x;
float *y;
};
// `by-val` struct will be coerced into a similar struct with all generic
// pointers lowerd into global ones.
// HOST: define void @_Z22__device_stub__kernel41S(i32* %s.coerce0, float* %s.coerce1)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel41S(%struct.S addrspace(4)*{{.*}} byref(%struct.S) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 0
// OPT: [[P0:%.*]] = load i32*, i32* addrspace(4)* [[R0]], align 8
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.S, %struct.S addrspace(4)* %0, i64 0, i32 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[V0:%.*]] = load i32, i32* [[P0]], align 4
// OPT: [[INC:%.*]] = add nsw i32 [[V0]], 1
// OPT: store i32 [[INC]], i32* [[P0]], align 4
// OPT: [[V1:%.*]] = load float, float* [[P1]], align 4
// OPT: [[ADD:%.*]] = fadd contract float [[V1]], 1.000000e+00
// OPT: store float [[ADD]], float* [[P1]], align 4
// OPT: ret void
__global__ void kernel4(struct S s) {
s.x[0]++;
s.y[0] += 1.f;
}
// If a pointer to struct is passed, only the pointer itself is coerced into the global one.
// HOST: define void @_Z22__device_stub__kernel5P1S(%struct.S* %s)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel5P1S(%struct.S addrspace(1)*{{.*}} %s.coerce)
__global__ void kernel5(struct S *s) {
s->x[0]++;
s->y[0] += 1.f;
}
struct T {
float *x[2];
};
// `by-val` array is also coerced.
// HOST: define void @_Z22__device_stub__kernel61T(float* %t.coerce0, float* %t.coerce1)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel61T(%struct.T addrspace(4)*{{.*}} byref(%struct.T) align 8 %0)
// OPT: [[R0:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 0
// OPT: [[P0:%.*]] = load float*, float* addrspace(4)* [[R0]], align 8
// OPT: [[R1:%.*]] = getelementptr inbounds %struct.T, %struct.T addrspace(4)* %0, i64 0, i32 0, i64 1
// OPT: [[P1:%.*]] = load float*, float* addrspace(4)* [[R1]], align 8
// OPT: [[V0:%.*]] = load float, float* [[P0]], align 4
// OPT: [[ADD0:%.*]] = fadd contract float [[V0]], 1.000000e+00
// OPT: store float [[ADD0]], float* [[P0]], align 4
// OPT: [[V1:%.*]] = load float, float* [[P1]], align 4
// OPT: [[ADD1:%.*]] = fadd contract float [[V1]], 2.000000e+00
// OPT: store float [[ADD1]], float* [[P1]], align 4
// OPT: ret void
__global__ void kernel6(struct T t) {
t.x[0][0] += 1.f;
t.x[1][0] += 2.f;
}
// Check that coerced pointers retain the noalias attribute when qualified with __restrict.
// HOST: define void @_Z22__device_stub__kernel7Pi(i32* noalias %x)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel7Pi(i32 addrspace(1)* noalias{{.*}} %x.coerce)
__global__ void kernel7(int *__restrict x) {
x[0]++;
}
// Single element struct.
struct SS {
float *x;
};
// HOST: define void @_Z22__device_stub__kernel82SS(float* %a.coerce)
// COMMON-LABEL: define amdgpu_kernel void @_Z7kernel82SS(float addrspace(1)*{{.*}} %a.coerce)
// CHECK: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// CHECK-NOT: = addrspacecast [[TYPE:.*]] addrspace(1)* %{{.*}} to [[TYPE]]*
// OPT: [[VAL:%.*]] = load float, float addrspace(1)* %a.coerce, align 4
// OPT: [[INC:%.*]] = fadd contract float [[VAL]], 3.000000e+00
// OPT: store float [[INC]], float addrspace(1)* %a.coerce, align 4
// OPT: ret void
__global__ void kernel8(struct SS a) {
*a.x += 3.f;
}
|
caf0abeb5fc0955699eb7017afe22782224e72d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fractal3d.h"
/* fractal3d.cu -- part of the CUDA fractal3d implementation
*
* Copyright (C) 2015 Alrik Firl
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE file for details.
*/
__device__ float4 juliabulb(const float3 dim_limits, const float r, const float theta, const float phi)
{
float4 out_coords;
out_coords.w = r * r * r * r * r * r * r * r;
out_coords.x = dim_limits.x + out_coords.w * cos(theta) * cos(phi) + 0.353;
out_coords.y = dim_limits.y + out_coords.w * sin(theta) * cos(phi) + 0.288;
out_coords.z = dim_limits.z + out_coords.w * sin(phi) + 0.2;
return out_coords;
}
__device__ float4 mandelbulb(const float3 dim_limits, const float r, const float theta, const float phi)
{
float4 out_coords;
out_coords.w = r * r * r * r * r * r * r * r;
out_coords.x = dim_limits.x + out_coords.w * cos(theta) * cos(phi);
out_coords.y = dim_limits.y + out_coords.w * sin(theta) * cos(phi);
out_coords.z = dim_limits.z + out_coords.w * sin(phi);
return out_coords;
}
//TODO: implement some other fractal functions here....
template <typename pixel_t, int FRACTAL_ID>
__global__ void fractal3d_kernel (pixel_t* image,
const int depth_idx,
const int4 dimensions,
const int2 INT_CONSTANTS,
const float4 FLT_CONSTANTS)
{
const float MIN_LIMIT = FLT_CONSTANTS.x;
const float MAX_LIMIT = FLT_CONSTANTS.y;
const int ORDER = INT_CONSTANTS.y;
const int index_x = blockIdx.x * blockDim.x + threadIdx.x;
const int index_y = blockIdx.y * blockDim.y + threadIdx.y;
float3 dim_limits;
dim_limits.x = MIN_LIMIT + index_x * ((MAX_LIMIT - MIN_LIMIT) / dimensions.x);
dim_limits.y = MIN_LIMIT + index_y * ((MAX_LIMIT - MIN_LIMIT) / dimensions.y);
dim_limits.z = MIN_LIMIT + depth_idx * ((MAX_LIMIT - MIN_LIMIT) / dimensions.z);
float4 coords = (float4) {0.0f, 0.0f, 0.0f, 0.0f};
float r = 0.0f;
float theta = 0.0f;
float phi = 0.0f;
pixel_t iter_num = 0;
for (iter_num = 0; iter_num < INT_CONSTANTS.x; ++iter_num)
{
r = sqrt(coords.x * coords.x + coords.y * coords.y + coords.z * coords.z);
if(r > FLT_CONSTANTS.z)
break;
theta = ORDER * atan2(sqrt(coords.x * coords.x + coords.y * coords.y), coords.z);
phi = ORDER * atan2(coords.x, coords.y);
//use the given fractal type
switch(FRACTAL_ID)
{
case MANDELBROT:
{
coords = mandelbulb(dim_limits, r, theta, phi);
break;
}
case JULIA:
{
coords = juliabulb(dim_limits, r, theta, phi);
break;
}
}
}
image[index_y * dimensions.x + index_x] = max(0, iter_num-1);
}
//--------------------------------------------------------------------------------------------------------------------------------
template <typename pixel_t, int FRACTAL_ID> __host__
void run_fractalgen(pixel_t* dev_image, int depth_idx, const int4 dimensions, const int2 constants, const float4 flt_constants)
{
static constexpr int blockdim = 16;
//want to process a frame per kernel invocation -- frames will be something e.g. [128 x 128], [512 x 512], [1024 x 1024], etc.
dim3 block_dim (blockdim, blockdim);
dim3 grid_dim (static_cast<int>(::ceil(dimensions.x / static_cast<float>(blockdim))), static_cast<int>(::ceil(dimensions.y / static_cast<float>(blockdim))));
hipLaunchKernelGGL(( fractal3d_kernel<pixel_t, FRACTAL_ID>), dim3(grid_dim), dim3(block_dim), 0, 0, dev_image, depth_idx, dimensions, constants, flt_constants);
}
template __host__ void run_fractalgen <unsigned char, 0> (unsigned char*, int, const int4, const int2, const float4);
template __host__ void run_fractalgen <unsigned char, 1> (unsigned char*, int, const int4, const int2, const float4);
| caf0abeb5fc0955699eb7017afe22782224e72d5.cu | #include "fractal3d.h"
/* fractal3d.cu -- part of the CUDA fractal3d implementation
*
* Copyright (C) 2015 Alrik Firl
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE file for details.
*/
__device__ float4 juliabulb(const float3 dim_limits, const float r, const float theta, const float phi)
{
float4 out_coords;
out_coords.w = r * r * r * r * r * r * r * r;
out_coords.x = dim_limits.x + out_coords.w * cos(theta) * cos(phi) + 0.353;
out_coords.y = dim_limits.y + out_coords.w * sin(theta) * cos(phi) + 0.288;
out_coords.z = dim_limits.z + out_coords.w * sin(phi) + 0.2;
return out_coords;
}
__device__ float4 mandelbulb(const float3 dim_limits, const float r, const float theta, const float phi)
{
float4 out_coords;
out_coords.w = r * r * r * r * r * r * r * r;
out_coords.x = dim_limits.x + out_coords.w * cos(theta) * cos(phi);
out_coords.y = dim_limits.y + out_coords.w * sin(theta) * cos(phi);
out_coords.z = dim_limits.z + out_coords.w * sin(phi);
return out_coords;
}
//TODO: implement some other fractal functions here....
template <typename pixel_t, int FRACTAL_ID>
__global__ void fractal3d_kernel (pixel_t* image,
const int depth_idx,
const int4 dimensions,
const int2 INT_CONSTANTS,
const float4 FLT_CONSTANTS)
{
const float MIN_LIMIT = FLT_CONSTANTS.x;
const float MAX_LIMIT = FLT_CONSTANTS.y;
const int ORDER = INT_CONSTANTS.y;
const int index_x = blockIdx.x * blockDim.x + threadIdx.x;
const int index_y = blockIdx.y * blockDim.y + threadIdx.y;
float3 dim_limits;
dim_limits.x = MIN_LIMIT + index_x * ((MAX_LIMIT - MIN_LIMIT) / dimensions.x);
dim_limits.y = MIN_LIMIT + index_y * ((MAX_LIMIT - MIN_LIMIT) / dimensions.y);
dim_limits.z = MIN_LIMIT + depth_idx * ((MAX_LIMIT - MIN_LIMIT) / dimensions.z);
float4 coords = (float4) {0.0f, 0.0f, 0.0f, 0.0f};
float r = 0.0f;
float theta = 0.0f;
float phi = 0.0f;
pixel_t iter_num = 0;
for (iter_num = 0; iter_num < INT_CONSTANTS.x; ++iter_num)
{
r = sqrt(coords.x * coords.x + coords.y * coords.y + coords.z * coords.z);
if(r > FLT_CONSTANTS.z)
break;
theta = ORDER * atan2(sqrt(coords.x * coords.x + coords.y * coords.y), coords.z);
phi = ORDER * atan2(coords.x, coords.y);
//use the given fractal type
switch(FRACTAL_ID)
{
case MANDELBROT:
{
coords = mandelbulb(dim_limits, r, theta, phi);
break;
}
case JULIA:
{
coords = juliabulb(dim_limits, r, theta, phi);
break;
}
}
}
image[index_y * dimensions.x + index_x] = max(0, iter_num-1);
}
//--------------------------------------------------------------------------------------------------------------------------------
template <typename pixel_t, int FRACTAL_ID> __host__
void run_fractalgen(pixel_t* dev_image, int depth_idx, const int4 dimensions, const int2 constants, const float4 flt_constants)
{
static constexpr int blockdim = 16;
//want to process a frame per kernel invocation -- frames will be something e.g. [128 x 128], [512 x 512], [1024 x 1024], etc.
dim3 block_dim (blockdim, blockdim);
dim3 grid_dim (static_cast<int>(std::ceil(dimensions.x / static_cast<float>(blockdim))), static_cast<int>(std::ceil(dimensions.y / static_cast<float>(blockdim))));
fractal3d_kernel<pixel_t, FRACTAL_ID><<<grid_dim, block_dim>>> (dev_image, depth_idx, dimensions, constants, flt_constants);
}
template __host__ void run_fractalgen <unsigned char, 0> (unsigned char*, int, const int4, const int2, const float4);
template __host__ void run_fractalgen <unsigned char, 1> (unsigned char*, int, const int4, const int2, const float4);
|
481133cc3969f04e2af36172dfa852b576966c51.hip | // !!! This is a file automatically generated by hipify!!!
#include "CTimer.h"
CTimer::CTimer(bool _start_timer_){
hipEventCreate(&_start);
hipEventCreate(&_stop);
if(_start_timer_ == true){
hipEventRecord(_start, 0);
isRunning = true;
}else{
isRunning = false;
}
}
CTimer::CTimer(){
hipEventCreate(&_start);
hipEventCreate(&_stop);
isRunning = false;
}
CTimer::~CTimer(){
hipEventDestroy(_start);
hipEventDestroy(_stop);
}
void CTimer::start(){
if( isRunning == true )
{
cout << "(EE) CTimer :: trying to start a CTimer object that is already running !" << endl;
}
else
{
isRunning = true;
hipEventRecord(_start, 0);
}
}
void CTimer::stop(){
cout << "CTimer::stop()" << endl;
if( isRunning == false ){
cout << "(EE) CTimer :: trying to stop a CTimer object that is not running !" << endl;
}else{
hipEventRecord(_stop, 0);
isRunning = false;
}
}
void CTimer::reset(){
hipEventRecord(_start, 0);
}
long CTimer::get_time_ns(){
float elapsedTime;
if( isRunning == true ){
hipEventRecord(_stop, 0);
hipEventSynchronize(_stop);
}
hipEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(1000.0 * 1000.0 * elapsedTime);
}
long CTimer::get_time_us(){
float elapsedTime;
if( isRunning == true ){
hipEventRecord(_stop, 0);
hipEventSynchronize(_stop);
}
hipEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(1000.0 * elapsedTime);
}
long CTimer::get_time_ms(){
float elapsedTime;
if( isRunning == true ){
hipEventRecord(_stop, 0);
hipEventSynchronize(_stop);
}
hipEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(elapsedTime);
}
long CTimer::get_time_sec(){
return (long)(get_time_ms() / 1000.0);
}
| 481133cc3969f04e2af36172dfa852b576966c51.cu |
#include "CTimer.h"
CTimer::CTimer(bool _start_timer_){
cudaEventCreate(&_start);
cudaEventCreate(&_stop);
if(_start_timer_ == true){
cudaEventRecord(_start, 0);
isRunning = true;
}else{
isRunning = false;
}
}
CTimer::CTimer(){
cudaEventCreate(&_start);
cudaEventCreate(&_stop);
isRunning = false;
}
CTimer::~CTimer(){
cudaEventDestroy(_start);
cudaEventDestroy(_stop);
}
void CTimer::start(){
if( isRunning == true )
{
cout << "(EE) CTimer :: trying to start a CTimer object that is already running !" << endl;
}
else
{
isRunning = true;
cudaEventRecord(_start, 0);
}
}
void CTimer::stop(){
cout << "CTimer::stop()" << endl;
if( isRunning == false ){
cout << "(EE) CTimer :: trying to stop a CTimer object that is not running !" << endl;
}else{
cudaEventRecord(_stop, 0);
isRunning = false;
}
}
void CTimer::reset(){
cudaEventRecord(_start, 0);
}
long CTimer::get_time_ns(){
float elapsedTime;
if( isRunning == true ){
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
}
cudaEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(1000.0 * 1000.0 * elapsedTime);
}
long CTimer::get_time_us(){
float elapsedTime;
if( isRunning == true ){
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
}
cudaEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(1000.0 * elapsedTime);
}
long CTimer::get_time_ms(){
float elapsedTime;
if( isRunning == true ){
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
}
cudaEventElapsedTime(&elapsedTime, _start, _stop); // that's our time!
return (long)(elapsedTime);
}
long CTimer::get_time_sec(){
return (long)(get_time_ms() / 1000.0);
}
|
ad15260a4276a85601137a10fa3eaa1172ef62aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h
#include <cutil_math.h>
#include <volumeRender.h>
// random ray offset texture, didn't pay off much in this demo
//#define VOLUMERENDER_RANDSIZE 256
#define VOLUMERENDER_TFS 2
#define VOLUMERENDER_TF_PREINTSIZE 1024
#define VOLUMERENDER_TF_PREINTSTEPS 1024
#define VOLUMERENDER_TF_PREINTRAY 4
enum TFMode{
TF_SINGLE_1D = 0, // single 1D TF for everything
TF_LAYERED_2D_PREINT = 1, // layered 2D TF uses pre-integration
TF_LAYERED_2D = 2, // layered 2D TF without pre-integration behavior
};
typedef unsigned int uint;
typedef unsigned char uchar;
static bool usePreInt = true;
static hipArray *d_transferIntegrate = 0;
static hipArray *d_transferFunc = 0;
static hipArray *d_transferArray = 0;
#ifdef VOLUMERENDER_RANDSIZE
// 2D ray offsets
static hipArray *d_rayArray = 0;
texture<uchar, 2, hipReadModeNormalizedFloat> rayTex;
#endif
// 3D texture
texture<VolumeType, 3, hipReadModeNormalizedFloat> volumeTex;
// 1D transfer function texture
texture<float4, 1, hipReadModeElementType> transferTex;
// 1D transfer integration texture
texture<float4, 1, hipReadModeElementType> transferIntegrateTex;
surface<void, 1> transferIntegrateSurf;
// 2D layered preintegrated transfer function texture
texture<float4, hipTextureType2DLayered,hipReadModeElementType> transferLayerPreintTex;
surface<void, cudaSurfaceType2DLayered> transferLayerPreintSurf;
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
template <int TFMODE >
__device__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
const float rayscale = float(TFMODE != TF_SINGLE_1D ? VOLUMERENDER_TF_PREINTRAY : 1);
const int maxSteps = 512;
const float tstep = 0.01f * rayscale;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
density *= rayscale;
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
#ifdef VOLUMERENDER_RANDSIZE
float offset = (tex2D(rayTex,u,v));
pos += step * offset;
#endif
float lastsample = 0;
//lastsample = (lastsample-transferOffset)*transferScale;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
float3 coord = make_float3(pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
float sample = tex3D(volumeTex, coord.x, coord.y, coord.z);
//sample = (sample-transferOffset)*transferScale;
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col;
int tfid = (pos.x < 0);
if (TFMODE != TF_SINGLE_1D){
col = tex2DLayered(transferLayerPreintTex, sample, TFMODE==TF_LAYERED_2D ? sample : lastsample, tfid);
col.w *= density;
lastsample = sample;
}
else{
col = tex1D(transferTex, sample);
col.w *= 0;
}
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
__global__ void
d_render_regular(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_SINGLE_1D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
__global__ void
d_render_preint(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_LAYERED_2D_PREINT>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
__global__ void
d_render_preint_off(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_LAYERED_2D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
//////////////////////////////////////////////////////////////////////////
__global__ void
d_integrate_trapezoidal(hipExtent extent){
uint x = blockIdx.x*blockDim.x + threadIdx.x;
// for higher speed could use hierarchical approach for sum
if (x >= extent.width){
return;
}
float stepsize = 1.0/float(extent.width-1);
float to = float(x) * stepsize;
float4 outclr = make_float4(0,0,0,0);
float incr = stepsize;
float4 lastval = tex1D(transferTex,0);
float cur = incr;
while (cur < to + incr * 0.5){
float4 val = tex1D(transferTex,cur);
float4 trapezoid = (lastval+val)/2.0f;
lastval = val;
outclr += trapezoid;
cur += incr;
}
surf1Dwrite(outclr,transferIntegrateSurf,x * sizeof(float4));
}
__global__ void
d_preintegrate(int layer, float steps, hipExtent extent)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= extent.width || y >= extent.height){
return;
}
float sx = float(x)/float(extent.width);
float sy = float(y)/float(extent.height);
float smax = max(sx,sy);
float smin = min(sx,sy);
float4 iv;
if (x != y){
// assumes square textures!
float fracc = smax - smin;
fracc = 1.0 /(fracc*steps);
float4 intmax = tex1D(transferIntegrateTex,smax);
float4 intmin = tex1D(transferIntegrateTex,smin);
iv.x = (intmax.x - intmin.x)*fracc;
iv.y = (intmax.y - intmin.y)*fracc;
iv.z = (intmax.z - intmin.z)*fracc;
//iv.w = (intmax.w - intmin.w)*fracc;
iv.w = (1.0 - exp( -(intmax.w - intmin.w) * fracc ));
}
else
{
float4 sample = tex1D(transferTex,smin);
iv.x = sample.x;
iv.y = sample.y;
iv.z = sample.z;
//iv.w = sample.w;
iv.w = (1.0 - exp(-sample.w));
}
iv.x = __saturatef(iv.x);
iv.y = __saturatef(iv.y);
iv.z = __saturatef(iv.z);
iv.w = __saturatef(iv.w);
surf2DLayeredwrite(iv,transferLayerPreintSurf, x * sizeof(float4), y, layer);
}
//////////////////////////////////////////////////////////////////////////
void VolumeRender_setTextureFilterMode(bool bLinearFilter)
{
volumeTex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint;
}
void VolumeRender_setVolume(const Volume* vol)
{
cutilSafeCall (hipBindTextureToArray(volumeTex, vol->content, vol->channelDesc));
}
static int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void VolumeRender_updateTF(int tfIdx, int numColors, float4* colors)
{
if (d_transferFunc){
cutilSafeCall(hipFreeArray(d_transferFunc));
d_transferFunc = 0;
}
hipChannelFormatDesc channelFloat4 = hipCreateChannelDesc<float4>();
cutilSafeCall(hipMallocArray( &d_transferFunc, &channelFloat4, numColors, 1));
cutilSafeCall(hipMemcpyToArray( d_transferFunc, 0, 0, colors, sizeof(float4)*numColors, hipMemcpyHostToDevice));
// Bind the array to the texture
cutilSafeCall( hipBindTextureToArray( transferTex, d_transferFunc, channelFloat4));
if (tfIdx < 0 || tfIdx >= VOLUMERENDER_TFS){
return;
}
{
hipExtent extent = {VOLUMERENDER_TF_PREINTSTEPS, 0,0};
dim3 blockSize(32,1,1);
dim3 gridSize(iDivUp(extent.width,blockSize.x),1,1);
hipLaunchKernelGGL(( d_integrate_trapezoidal), dim3(gridSize), dim3(blockSize), 0, 0, extent );
}
{
hipExtent extent = {VOLUMERENDER_TF_PREINTSIZE, VOLUMERENDER_TF_PREINTSIZE,VOLUMERENDER_TFS};
dim3 blockSize(16,16,1);
dim3 gridSize(iDivUp(extent.width,blockSize.x),iDivUp(extent.height,blockSize.y),1);
hipLaunchKernelGGL(( d_preintegrate), dim3(gridSize), dim3(blockSize), 0, 0, tfIdx, float(VOLUMERENDER_TF_PREINTSTEPS), extent );
}
}
void VolumeRender_init()
{
#ifdef VOLUMERENDER_RANDSIZE
// random ray offsets to fight aliasing
srand(128123);
uchar* randoms = new uchar[VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE];
for (int i = 0; i < VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE; i++){
randoms[i] = rand();
}
hipChannelFormatDesc channelUchar = hipCreateChannelDesc<uchar>();
cutilSafeCall(hipMallocArray( &d_rayArray, &channelUchar, VOLUMERENDER_RANDSIZE, VOLUMERENDER_RANDSIZE));
cutilSafeCall(hipMemcpyToArray( d_rayArray, 0, 0, randoms, sizeof(uchar)*VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE, hipMemcpyHostToDevice));
delete [] randoms;
rayTex.normalized = 1;
rayTex.filterMode = hipFilterModeLinear; // linear interpolation
rayTex.addressMode[0] = hipAddressModeWrap;
rayTex.addressMode[1] = hipAddressModeWrap;
cutilSafeCall( hipBindTextureToArray( rayTex, d_rayArray, channelUchar));
#endif
// set texture parameters
volumeTex.normalized = true; // access with normalized texture coordinates
volumeTex.filterMode = hipFilterModeLinear; // linear interpolation
volumeTex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
volumeTex.addressMode[1] = hipAddressModeClamp;
volumeTex.addressMode[2] = hipAddressModeClamp;
transferTex.normalized = true;
transferTex.filterMode = hipFilterModeLinear;
transferTex.addressMode[0] = hipAddressModeClamp;
transferLayerPreintTex.normalized = true;
transferLayerPreintTex.filterMode = hipFilterModeLinear;
transferLayerPreintTex.addressMode[0] = hipAddressModeClamp;
transferLayerPreintTex.addressMode[1] = hipAddressModeClamp;
hipChannelFormatDesc channelFloat4 = hipCreateChannelDesc<float4>();
hipExtent extent = {VOLUMERENDER_TF_PREINTSIZE, VOLUMERENDER_TF_PREINTSIZE,VOLUMERENDER_TFS};
cutilSafeCall( hipMalloc3DArray( &d_transferArray, &channelFloat4, extent, hipArrayLayered | hipArraySurfaceLoadStore));
cutilSafeCall( hipBindTextureToArray( transferLayerPreintTex, d_transferArray, channelFloat4));
cutilSafeCall( hipBindSurfaceToArray( transferLayerPreintSurf, d_transferArray, channelFloat4));
transferIntegrateTex.normalized = true;
transferIntegrateTex.filterMode = hipFilterModeLinear;
transferIntegrateTex.addressMode[0] = hipAddressModeClamp;
transferIntegrateTex.addressMode[1] = hipAddressModeClamp;
transferIntegrateTex.addressMode[2] = hipAddressModeClamp;
cutilSafeCall( hipMallocArray( &d_transferIntegrate, &channelFloat4, VOLUMERENDER_TF_PREINTSTEPS,0,hipArraySurfaceLoadStore));
cutilSafeCall( hipBindTextureToArray( transferIntegrateTex, d_transferIntegrate, channelFloat4));
cutilSafeCall( hipBindSurfaceToArray( transferIntegrateSurf, d_transferIntegrate, channelFloat4));
// create transfer function texture
float4 transferFunc0[] = {
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
float4 transferFunc1[] = {
{ 0.0, 0.0, 0.0, 0.0, },
{ 0.0, 1.0, 0.0, 0.125, },
{ 0.0, 0.5, 1.0, 0.125, },
{ 0.0, 1.0, 1.0, 0.125, },
{ 0.0, 1.0, 0.0, 0.125, },
{ 0.25, 0.75, 0.0, 1.0, },
{ 0.75, 0.25, 0.0, 0.125, },
{ 1.0, 0.75, 0.0, 0.125, },
{ 0.0, 0.0, 0.0, 0.0, },
};
VolumeRender_updateTF(1,sizeof(transferFunc1)/sizeof(float4),transferFunc1);
VolumeRender_updateTF(0,sizeof(transferFunc0)/sizeof(float4),transferFunc0);
}
void VolumeRender_deinit()
{
cutilSafeCall(hipFreeArray(d_transferFunc));
cutilSafeCall(hipFreeArray(d_transferArray));
cutilSafeCall(hipFreeArray(d_transferIntegrate));
d_transferArray = 0;
d_transferFunc = 0;
d_transferIntegrate = 0;
#ifdef VOLUMERENDER_RANDSIZE
cutilSafeCall(hipFreeArray(d_rayArray));
d_rayArray = 0;
#endif
}
void VolumeRender_setPreIntegrated(int state){
usePreInt = !!state;
}
void VolumeRender_render(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
if (usePreInt){
hipLaunchKernelGGL(( d_render_preint), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
else{
hipLaunchKernelGGL(( d_render_preint_off), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
}
void VolumeRender_copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
cutilSafeCall( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| ad15260a4276a85601137a10fa3eaa1172ef62aa.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h
#include <cutil_math.h>
#include <volumeRender.h>
// random ray offset texture, didn't pay off much in this demo
//#define VOLUMERENDER_RANDSIZE 256
#define VOLUMERENDER_TFS 2
#define VOLUMERENDER_TF_PREINTSIZE 1024
#define VOLUMERENDER_TF_PREINTSTEPS 1024
#define VOLUMERENDER_TF_PREINTRAY 4
enum TFMode{
TF_SINGLE_1D = 0, // single 1D TF for everything
TF_LAYERED_2D_PREINT = 1, // layered 2D TF uses pre-integration
TF_LAYERED_2D = 2, // layered 2D TF without pre-integration behavior
};
typedef unsigned int uint;
typedef unsigned char uchar;
static bool usePreInt = true;
static cudaArray *d_transferIntegrate = 0;
static cudaArray *d_transferFunc = 0;
static cudaArray *d_transferArray = 0;
#ifdef VOLUMERENDER_RANDSIZE
// 2D ray offsets
static cudaArray *d_rayArray = 0;
texture<uchar, 2, cudaReadModeNormalizedFloat> rayTex;
#endif
// 3D texture
texture<VolumeType, 3, cudaReadModeNormalizedFloat> volumeTex;
// 1D transfer function texture
texture<float4, 1, cudaReadModeElementType> transferTex;
// 1D transfer integration texture
texture<float4, 1, cudaReadModeElementType> transferIntegrateTex;
surface<void, 1> transferIntegrateSurf;
// 2D layered preintegrated transfer function texture
texture<float4, cudaTextureType2DLayered,cudaReadModeElementType> transferLayerPreintTex;
surface<void, cudaSurfaceType2DLayered> transferLayerPreintSurf;
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
template <int TFMODE >
__device__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
const float rayscale = float(TFMODE != TF_SINGLE_1D ? VOLUMERENDER_TF_PREINTRAY : 1);
const int maxSteps = 512;
const float tstep = 0.01f * rayscale;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
density *= rayscale;
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
#ifdef VOLUMERENDER_RANDSIZE
float offset = (tex2D(rayTex,u,v));
pos += step * offset;
#endif
float lastsample = 0;
//lastsample = (lastsample-transferOffset)*transferScale;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
float3 coord = make_float3(pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
float sample = tex3D(volumeTex, coord.x, coord.y, coord.z);
//sample = (sample-transferOffset)*transferScale;
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col;
int tfid = (pos.x < 0);
if (TFMODE != TF_SINGLE_1D){
col = tex2DLayered(transferLayerPreintTex, sample, TFMODE==TF_LAYERED_2D ? sample : lastsample, tfid);
col.w *= density;
lastsample = sample;
}
else{
col = tex1D(transferTex, sample);
col.w *= 0;
}
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
__global__ void
d_render_regular(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_SINGLE_1D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
__global__ void
d_render_preint(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_LAYERED_2D_PREINT>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
__global__ void
d_render_preint_off(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float transferWeight = 0.0f)
{
d_render<TF_LAYERED_2D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight);
}
//////////////////////////////////////////////////////////////////////////
__global__ void
d_integrate_trapezoidal(cudaExtent extent){
uint x = blockIdx.x*blockDim.x + threadIdx.x;
// for higher speed could use hierarchical approach for sum
if (x >= extent.width){
return;
}
float stepsize = 1.0/float(extent.width-1);
float to = float(x) * stepsize;
float4 outclr = make_float4(0,0,0,0);
float incr = stepsize;
float4 lastval = tex1D(transferTex,0);
float cur = incr;
while (cur < to + incr * 0.5){
float4 val = tex1D(transferTex,cur);
float4 trapezoid = (lastval+val)/2.0f;
lastval = val;
outclr += trapezoid;
cur += incr;
}
surf1Dwrite(outclr,transferIntegrateSurf,x * sizeof(float4));
}
__global__ void
d_preintegrate(int layer, float steps, cudaExtent extent)
{
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= extent.width || y >= extent.height){
return;
}
float sx = float(x)/float(extent.width);
float sy = float(y)/float(extent.height);
float smax = max(sx,sy);
float smin = min(sx,sy);
float4 iv;
if (x != y){
// assumes square textures!
float fracc = smax - smin;
fracc = 1.0 /(fracc*steps);
float4 intmax = tex1D(transferIntegrateTex,smax);
float4 intmin = tex1D(transferIntegrateTex,smin);
iv.x = (intmax.x - intmin.x)*fracc;
iv.y = (intmax.y - intmin.y)*fracc;
iv.z = (intmax.z - intmin.z)*fracc;
//iv.w = (intmax.w - intmin.w)*fracc;
iv.w = (1.0 - exp( -(intmax.w - intmin.w) * fracc ));
}
else
{
float4 sample = tex1D(transferTex,smin);
iv.x = sample.x;
iv.y = sample.y;
iv.z = sample.z;
//iv.w = sample.w;
iv.w = (1.0 - exp(-sample.w));
}
iv.x = __saturatef(iv.x);
iv.y = __saturatef(iv.y);
iv.z = __saturatef(iv.z);
iv.w = __saturatef(iv.w);
surf2DLayeredwrite(iv,transferLayerPreintSurf, x * sizeof(float4), y, layer);
}
//////////////////////////////////////////////////////////////////////////
void VolumeRender_setTextureFilterMode(bool bLinearFilter)
{
volumeTex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint;
}
void VolumeRender_setVolume(const Volume* vol)
{
cutilSafeCall (cudaBindTextureToArray(volumeTex, vol->content, vol->channelDesc));
}
static int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void VolumeRender_updateTF(int tfIdx, int numColors, float4* colors)
{
if (d_transferFunc){
cutilSafeCall(cudaFreeArray(d_transferFunc));
d_transferFunc = 0;
}
cudaChannelFormatDesc channelFloat4 = cudaCreateChannelDesc<float4>();
cutilSafeCall(cudaMallocArray( &d_transferFunc, &channelFloat4, numColors, 1));
cutilSafeCall(cudaMemcpyToArray( d_transferFunc, 0, 0, colors, sizeof(float4)*numColors, cudaMemcpyHostToDevice));
// Bind the array to the texture
cutilSafeCall( cudaBindTextureToArray( transferTex, d_transferFunc, channelFloat4));
if (tfIdx < 0 || tfIdx >= VOLUMERENDER_TFS){
return;
}
{
cudaExtent extent = {VOLUMERENDER_TF_PREINTSTEPS, 0,0};
dim3 blockSize(32,1,1);
dim3 gridSize(iDivUp(extent.width,blockSize.x),1,1);
d_integrate_trapezoidal<<<gridSize, blockSize>>>( extent );
}
{
cudaExtent extent = {VOLUMERENDER_TF_PREINTSIZE, VOLUMERENDER_TF_PREINTSIZE,VOLUMERENDER_TFS};
dim3 blockSize(16,16,1);
dim3 gridSize(iDivUp(extent.width,blockSize.x),iDivUp(extent.height,blockSize.y),1);
d_preintegrate<<<gridSize, blockSize>>>( tfIdx, float(VOLUMERENDER_TF_PREINTSTEPS), extent );
}
}
void VolumeRender_init()
{
#ifdef VOLUMERENDER_RANDSIZE
// random ray offsets to fight aliasing
srand(128123);
uchar* randoms = new uchar[VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE];
for (int i = 0; i < VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE; i++){
randoms[i] = rand();
}
cudaChannelFormatDesc channelUchar = cudaCreateChannelDesc<uchar>();
cutilSafeCall(cudaMallocArray( &d_rayArray, &channelUchar, VOLUMERENDER_RANDSIZE, VOLUMERENDER_RANDSIZE));
cutilSafeCall(cudaMemcpyToArray( d_rayArray, 0, 0, randoms, sizeof(uchar)*VOLUMERENDER_RANDSIZE*VOLUMERENDER_RANDSIZE, cudaMemcpyHostToDevice));
delete [] randoms;
rayTex.normalized = 1;
rayTex.filterMode = cudaFilterModeLinear; // linear interpolation
rayTex.addressMode[0] = cudaAddressModeWrap;
rayTex.addressMode[1] = cudaAddressModeWrap;
cutilSafeCall( cudaBindTextureToArray( rayTex, d_rayArray, channelUchar));
#endif
// set texture parameters
volumeTex.normalized = true; // access with normalized texture coordinates
volumeTex.filterMode = cudaFilterModeLinear; // linear interpolation
volumeTex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
volumeTex.addressMode[1] = cudaAddressModeClamp;
volumeTex.addressMode[2] = cudaAddressModeClamp;
transferTex.normalized = true;
transferTex.filterMode = cudaFilterModeLinear;
transferTex.addressMode[0] = cudaAddressModeClamp;
transferLayerPreintTex.normalized = true;
transferLayerPreintTex.filterMode = cudaFilterModeLinear;
transferLayerPreintTex.addressMode[0] = cudaAddressModeClamp;
transferLayerPreintTex.addressMode[1] = cudaAddressModeClamp;
cudaChannelFormatDesc channelFloat4 = cudaCreateChannelDesc<float4>();
cudaExtent extent = {VOLUMERENDER_TF_PREINTSIZE, VOLUMERENDER_TF_PREINTSIZE,VOLUMERENDER_TFS};
cutilSafeCall( cudaMalloc3DArray( &d_transferArray, &channelFloat4, extent, cudaArrayLayered | cudaArraySurfaceLoadStore));
cutilSafeCall( cudaBindTextureToArray( transferLayerPreintTex, d_transferArray, channelFloat4));
cutilSafeCall( cudaBindSurfaceToArray( transferLayerPreintSurf, d_transferArray, channelFloat4));
transferIntegrateTex.normalized = true;
transferIntegrateTex.filterMode = cudaFilterModeLinear;
transferIntegrateTex.addressMode[0] = cudaAddressModeClamp;
transferIntegrateTex.addressMode[1] = cudaAddressModeClamp;
transferIntegrateTex.addressMode[2] = cudaAddressModeClamp;
cutilSafeCall( cudaMallocArray( &d_transferIntegrate, &channelFloat4, VOLUMERENDER_TF_PREINTSTEPS,0,cudaArraySurfaceLoadStore));
cutilSafeCall( cudaBindTextureToArray( transferIntegrateTex, d_transferIntegrate, channelFloat4));
cutilSafeCall( cudaBindSurfaceToArray( transferIntegrateSurf, d_transferIntegrate, channelFloat4));
// create transfer function texture
float4 transferFunc0[] = {
{ 0.0, 0.0, 0.0, 0.0, },
{ 1.0, 0.0, 0.0, 1.0, },
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 0.0, 1.0, },
{ 0.0, 1.0, 1.0, 1.0, },
{ 0.0, 0.0, 1.0, 1.0, },
{ 1.0, 0.0, 1.0, 1.0, },
{ 0.0, 0.0, 0.0, 0.0, },
};
float4 transferFunc1[] = {
{ 0.0, 0.0, 0.0, 0.0, },
{ 0.0, 1.0, 0.0, 0.125, },
{ 0.0, 0.5, 1.0, 0.125, },
{ 0.0, 1.0, 1.0, 0.125, },
{ 0.0, 1.0, 0.0, 0.125, },
{ 0.25, 0.75, 0.0, 1.0, },
{ 0.75, 0.25, 0.0, 0.125, },
{ 1.0, 0.75, 0.0, 0.125, },
{ 0.0, 0.0, 0.0, 0.0, },
};
VolumeRender_updateTF(1,sizeof(transferFunc1)/sizeof(float4),transferFunc1);
VolumeRender_updateTF(0,sizeof(transferFunc0)/sizeof(float4),transferFunc0);
}
void VolumeRender_deinit()
{
cutilSafeCall(cudaFreeArray(d_transferFunc));
cutilSafeCall(cudaFreeArray(d_transferArray));
cutilSafeCall(cudaFreeArray(d_transferIntegrate));
d_transferArray = 0;
d_transferFunc = 0;
d_transferIntegrate = 0;
#ifdef VOLUMERENDER_RANDSIZE
cutilSafeCall(cudaFreeArray(d_rayArray));
d_rayArray = 0;
#endif
}
void VolumeRender_setPreIntegrated(int state){
usePreInt = !!state;
}
void VolumeRender_render(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
if (usePreInt){
d_render_preint<<<gridSize, blockSize>>>( d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
else{
d_render_preint_off<<<gridSize, blockSize>>>( d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
}
void VolumeRender_copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
cutilSafeCall( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) );
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
40e9bf1ef7aaecacc93c4e4eced61ff0a71e61b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_GT(X.size(), 0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( ClipKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), min_, max_, X.data<float>(), Y->mutable_data<float>());
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
hipLaunchKernelGGL(( ClipGradientKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
Y.size(), min_, max_, Y.data<float>(), dY.data<float>(),
dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2
| 40e9bf1ef7aaecacc93c4e4eced61ff0a71e61b7.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/clip_op.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T cuda_min(T x, T y);
template <typename T>
__device__ T cuda_max(T x, T y);
template <>
__device__ float cuda_min(float x, float y) { return fminf(x, y); }
template <>
__device__ float cuda_max(float x, float y) { return fmaxf(x, y); }
// Disabled since we don't use it right now.
/*
template <>
__device__ double cuda_min(double x, double y) { return fmin(x, y); }
template <>
__device__ double cuda_max(double x, double y) { return fmax(x, y); }
*/
template <typename T>
__global__ void ClipKernel(const int N, const T minval, const T maxval,
const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = cuda_min<T>(cuda_max<T>(X[i], minval), maxval);
}
}
template <typename T>
__global__ void ClipGradientKernel(const int N, const T minval,
const T maxval, const T* Y,
const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = dY[i] * (Y[i] > minval && Y[i] < maxval);
}
}
} // namespace
template <>
bool ClipOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
DCHECK_GT(X.size(), 0);
Y->ResizeLike(X);
ClipKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), min_, max_, X.data<float>(), Y->mutable_data<float>());
return true;
}
template <>
bool ClipGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
ClipGradientKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
Y.size(), min_, max_, Y.data<float>(), dY.data<float>(),
dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Clip, ClipOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(ClipGradient, ClipGradientOp<float, CUDAContext>);
} // namespace caffe2
|
2096bd680b2da8fe2b5229ffa2cd3dd19f975be1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDA.hpp"
/* change spherical vector into normalized xyz vector*/
__forceinline__ __host__ __device__ vec3f sphere_to_normal(const vec3f &sphere_direction)
{
const float &theta = sphere_direction.y;
const float &phi = sphere_direction.z;
return vec3f(cosf(theta) * sinf(phi),
sinf(theta) * sinf(phi),
cosf(phi));
}
/* change xyz vector into spherical vector with 0 length */
// __forceinline__ __host__ __device__ vec3f normal_to_sphere(const vec3f &xyz_direction)
// {
// float theta = atan(xyz_direction.y / xyz_direction.x);
// float phi = acos(xyz_direction.z / length(xyz_direction));
// if (xyz_direction.x < 0)
// {
// theta = (xyz_direction.y > 0) ? theta + M_PI : theta - M_PI;
// }
// return vec3f(0.f, theta, phi);
// }
/* change screen space position into ray direction */
__forceinline__ __host__ __device__ vec3f screen_to_direction(const vec2f &screen,
const vec3f &direction,
const vec3f &horizontal,
const vec3f &vertical)
{
return normalize(direction + (screen.x - 0.5f) * horizontal + (screen.y - 0.5f) * vertical);
}
// calculate edge strength, used in classic renderer
__forceinline__ __host__ __device__ float get_edge_strength(const int &M, const int &i)
{
return 1.f - 2.f * fabsf((float)i - (float)M / 2.f) / (float)M;
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__fastRenderer()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int &numPixelSamples = optixLaunchParams.parameters.NUM_PIXEL_SAMPLES;
const int &accumID = optixLaunchParams.frame.accumID;
const auto &camera = optixLaunchParams.camera;
PRD prd;
prd.random.init(ix + accumID * optixLaunchParams.frame.size.x,
iy + accumID * optixLaunchParams.frame.size.y);
prd.pixelColor = 1.f;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd, u0, u1);
vec3f pixelColor = 0.f;
for (int sampleID = 0; sampleID < numPixelSamples; sampleID++)
{
vec3f rayDir;
if (camera.camera_type == PINHOLE)
{
// normalized screen plane position, in [0,1]^2
vec2f screen;
if (numPixelSamples > 1)
{
screen = (vec2f(ix + prd.random(), iy + prd.random()) / vec2f(optixLaunchParams.frame.size));
}
else
{
screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
}
// generate ray direction
rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
}
else if (camera.camera_type == ENV)
{
// sperical coordinate position
vec3f spherical_position;
if (numPixelSamples > 1)
{
spherical_position = ((ix + prd.random()) * camera.horizontal + (iy + prd.random()) * camera.vertical);
}
else
{
spherical_position = ((ix + 0.5f) * camera.horizontal + (iy + 0.5f) * camera.vertical);
}
spherical_position -= vec3f(0.f, M_PI, 0.f);
// change into xyz coordinate position
const vec3f xyz_position(sphere_to_normal(spherical_position));
// view port transform
rayDir = {dot(camera.matrix.vx, xyz_position),
dot(camera.matrix.vy, xyz_position),
dot(camera.matrix.vz, xyz_position)};
}
const int &ray_type = optixLaunchParams.parameters.LAUNCH_RAY_TYPE;
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
ray_type, // SBT offset
RAY_TYPE_COUNT, // SBT stride
ray_type, // missSBTIndex
u0, u1);
pixelColor += prd.pixelColor;
}
const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f));
const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f));
const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __raygen__classicRenderer()
{
const auto &camera = optixLaunchParams.camera;
const int &stencil_length = optixLaunchParams.classic.stencil_length;
const int *const normal_index = optixLaunchParams.classic.stencil_normal_index;
const vec2f *const ray_stencil = optixLaunchParams.classic.ray_stencil;
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
// normalized screen plane position, in [0,1]^2
vec2f screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
// main ray direction
vec3f main_rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
// values for caculating edge strength
bool main_is_hit{false};
bool hit_same_object{true};
uint32_t main_geometryID;
int num_missed{0};
int num_different{0};
float main_hitT;
int num_farther{0};
// tracing center ray
{
PRD_Classic prd_main;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd_main, u0, u1);
optixTrace(optixLaunchParams.traversable,
camera.position,
main_rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
CLASSIC_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
CLASSIC_RAY_TYPE, // missSBTIndex
u0, u1);
if (prd_main.is_hit)
{
main_is_hit = true;
main_geometryID = prd_main.geometryID;
}
else
{
hit_same_object = false;
}
main_hitT = prd_main.hitT;
}
vec3f normals[4];
{
int current_index{0};
// tracing ray_stencil
for (int i = 0; i < stencil_length; i++)
{
vec3f sub_rayDir = screen_to_direction(screen + ray_stencil[i], camera.direction, camera.horizontal, camera.vertical);
PRD_Classic prd_sub;
if (i == normal_index[current_index])
{
prd_sub.need_normal = true;
}
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd_sub, u0, u1);
optixTrace(optixLaunchParams.traversable,
camera.position,
sub_rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
CLASSIC_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
CLASSIC_RAY_TYPE, // missSBTIndex
u0, u1);
// silhouette edge or intersection line
if (prd_sub.is_hit)
{
if (prd_sub.geometryID != main_geometryID)
{
num_different++;
hit_same_object = false;
}
}
else
{
num_missed++;
hit_same_object = false;
}
// crease edge
if (prd_sub.need_normal)
{
normals[current_index] = prd_sub.normal;
if (current_index < 3)
current_index++;
}
// self-occluding silhouette
if (fabsf(prd_sub.hitT - main_hitT) > optixLaunchParams.classic.DISTANCE_CHANGE_THRESHOLD * main_hitT)
num_farther++;
}
}
// calculate edge
vec3f pixelColor = {255.f};
float edge_strength{0.f};
if (hit_same_object)
{
const float n_threshold = cosf(optixLaunchParams.classic.NORMAL_CHANGE_THRESHOLD);
float normal_change[2]{dot(normals[0], normals[2]), dot(normals[1], normals[3])}; // normal change between 0,2 and 1,3
// crease edge
if (normal_change[0] < n_threshold || normal_change[1] < n_threshold)
edge_strength = 1.f;
// self-occluding silhouette
else
edge_strength = get_edge_strength(stencil_length, num_farther);
}
// silhouette edge or intersection line
else
{
edge_strength = main_is_hit ? get_edge_strength(stencil_length, num_different + num_missed)
: get_edge_strength(stencil_length, num_missed);
}
pixelColor *= 1 - edge_strength;
const int r = int(255.99f * min(pixelColor.x, 1.f));
const int g = int(255.99f * min(pixelColor.y, 1.f));
const int b = int(255.99f * min(pixelColor.z, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __raygen__mixedRenderer()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int &numPixelSamples = optixLaunchParams.parameters.NUM_PIXEL_SAMPLES;
const int &accumID = optixLaunchParams.frame.accumID;
const auto &camera = optixLaunchParams.camera;
PRD prd;
prd.random.init(ix + accumID * optixLaunchParams.frame.size.x,
iy + accumID * optixLaunchParams.frame.size.y);
prd.pixelColor = 1.f;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd, u0, u1);
vec3f pixelColor = 0.f;
for (int sampleID = 0; sampleID < numPixelSamples; sampleID++)
{
vec3f rayDir;
if (camera.camera_type == PINHOLE)
{
// normalized screen plane position, in [0,1]^2
vec2f screen;
if (numPixelSamples > 1)
{
screen = (vec2f(ix + prd.random(), iy + prd.random()) / vec2f(optixLaunchParams.frame.size));
}
else
{
screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
}
// generate ray direction
rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
}
else if (camera.camera_type == ENV)
{
// sperical coordinate position
vec3f spherical_position;
if (numPixelSamples > 1)
{
spherical_position = ((ix + prd.random()) * camera.horizontal + (iy + prd.random()) * camera.vertical);
}
else
{
spherical_position = ((ix + 0.5f) * camera.horizontal + (iy + 0.5f) * camera.vertical);
}
spherical_position -= vec3f(0.f, M_PI, 0.f);
// change into xyz coordinate position
const vec3f xyz_position(sphere_to_normal(spherical_position));
// view port transform
rayDir = {dot(camera.matrix.vx, xyz_position),
dot(camera.matrix.vy, xyz_position),
dot(camera.matrix.vz, xyz_position)};
}
// radiance rendering
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
RADIANCE_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RADIANCE_RAY_TYPE, // missSBTIndex
u0, u1);
pixelColor += prd.pixelColor;
// edge rendering
prd.pixelColor = 1.0f;
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
MONO_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
MONO_RAY_TYPE, // missSBTIndex
u0, u1);
pixelColor *= prd.pixelColor;
}
const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f));
const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f));
const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
} | 2096bd680b2da8fe2b5229ffa2cd3dd19f975be1.cu | #include "CUDA.hpp"
/* change spherical vector into normalized xyz vector*/
__forceinline__ __host__ __device__ vec3f sphere_to_normal(const vec3f &sphere_direction)
{
const float &theta = sphere_direction.y;
const float &phi = sphere_direction.z;
return vec3f(cosf(theta) * sinf(phi),
sinf(theta) * sinf(phi),
cosf(phi));
}
/* change xyz vector into spherical vector with 0 length */
// __forceinline__ __host__ __device__ vec3f normal_to_sphere(const vec3f &xyz_direction)
// {
// float theta = atan(xyz_direction.y / xyz_direction.x);
// float phi = acos(xyz_direction.z / length(xyz_direction));
// if (xyz_direction.x < 0)
// {
// theta = (xyz_direction.y > 0) ? theta + M_PI : theta - M_PI;
// }
// return vec3f(0.f, theta, phi);
// }
/* change screen space position into ray direction */
__forceinline__ __host__ __device__ vec3f screen_to_direction(const vec2f &screen,
const vec3f &direction,
const vec3f &horizontal,
const vec3f &vertical)
{
return normalize(direction + (screen.x - 0.5f) * horizontal + (screen.y - 0.5f) * vertical);
}
// calculate edge strength, used in classic renderer
__forceinline__ __host__ __device__ float get_edge_strength(const int &M, const int &i)
{
return 1.f - 2.f * fabsf((float)i - (float)M / 2.f) / (float)M;
}
//------------------------------------------------------------------------------
// ray gen program - the actual rendering happens in here
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__fastRenderer()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int &numPixelSamples = optixLaunchParams.parameters.NUM_PIXEL_SAMPLES;
const int &accumID = optixLaunchParams.frame.accumID;
const auto &camera = optixLaunchParams.camera;
PRD prd;
prd.random.init(ix + accumID * optixLaunchParams.frame.size.x,
iy + accumID * optixLaunchParams.frame.size.y);
prd.pixelColor = 1.f;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd, u0, u1);
vec3f pixelColor = 0.f;
for (int sampleID = 0; sampleID < numPixelSamples; sampleID++)
{
vec3f rayDir;
if (camera.camera_type == PINHOLE)
{
// normalized screen plane position, in [0,1]^2
vec2f screen;
if (numPixelSamples > 1)
{
screen = (vec2f(ix + prd.random(), iy + prd.random()) / vec2f(optixLaunchParams.frame.size));
}
else
{
screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
}
// generate ray direction
rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
}
else if (camera.camera_type == ENV)
{
// sperical coordinate position
vec3f spherical_position;
if (numPixelSamples > 1)
{
spherical_position = ((ix + prd.random()) * camera.horizontal + (iy + prd.random()) * camera.vertical);
}
else
{
spherical_position = ((ix + 0.5f) * camera.horizontal + (iy + 0.5f) * camera.vertical);
}
spherical_position -= vec3f(0.f, M_PI, 0.f);
// change into xyz coordinate position
const vec3f xyz_position(sphere_to_normal(spherical_position));
// view port transform
rayDir = {dot(camera.matrix.vx, xyz_position),
dot(camera.matrix.vy, xyz_position),
dot(camera.matrix.vz, xyz_position)};
}
const int &ray_type = optixLaunchParams.parameters.LAUNCH_RAY_TYPE;
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
ray_type, // SBT offset
RAY_TYPE_COUNT, // SBT stride
ray_type, // missSBTIndex
u0, u1);
pixelColor += prd.pixelColor;
}
const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f));
const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f));
const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __raygen__classicRenderer()
{
const auto &camera = optixLaunchParams.camera;
const int &stencil_length = optixLaunchParams.classic.stencil_length;
const int *const normal_index = optixLaunchParams.classic.stencil_normal_index;
const vec2f *const ray_stencil = optixLaunchParams.classic.ray_stencil;
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
// normalized screen plane position, in [0,1]^2
vec2f screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
// main ray direction
vec3f main_rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
// values for caculating edge strength
bool main_is_hit{false};
bool hit_same_object{true};
uint32_t main_geometryID;
int num_missed{0};
int num_different{0};
float main_hitT;
int num_farther{0};
// tracing center ray
{
PRD_Classic prd_main;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd_main, u0, u1);
optixTrace(optixLaunchParams.traversable,
camera.position,
main_rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
CLASSIC_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
CLASSIC_RAY_TYPE, // missSBTIndex
u0, u1);
if (prd_main.is_hit)
{
main_is_hit = true;
main_geometryID = prd_main.geometryID;
}
else
{
hit_same_object = false;
}
main_hitT = prd_main.hitT;
}
vec3f normals[4];
{
int current_index{0};
// tracing ray_stencil
for (int i = 0; i < stencil_length; i++)
{
vec3f sub_rayDir = screen_to_direction(screen + ray_stencil[i], camera.direction, camera.horizontal, camera.vertical);
PRD_Classic prd_sub;
if (i == normal_index[current_index])
{
prd_sub.need_normal = true;
}
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd_sub, u0, u1);
optixTrace(optixLaunchParams.traversable,
camera.position,
sub_rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
CLASSIC_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
CLASSIC_RAY_TYPE, // missSBTIndex
u0, u1);
// silhouette edge or intersection line
if (prd_sub.is_hit)
{
if (prd_sub.geometryID != main_geometryID)
{
num_different++;
hit_same_object = false;
}
}
else
{
num_missed++;
hit_same_object = false;
}
// crease edge
if (prd_sub.need_normal)
{
normals[current_index] = prd_sub.normal;
if (current_index < 3)
current_index++;
}
// self-occluding silhouette
if (fabsf(prd_sub.hitT - main_hitT) > optixLaunchParams.classic.DISTANCE_CHANGE_THRESHOLD * main_hitT)
num_farther++;
}
}
// calculate edge
vec3f pixelColor = {255.f};
float edge_strength{0.f};
if (hit_same_object)
{
const float n_threshold = cosf(optixLaunchParams.classic.NORMAL_CHANGE_THRESHOLD);
float normal_change[2]{dot(normals[0], normals[2]), dot(normals[1], normals[3])}; // normal change between 0,2 and 1,3
// crease edge
if (normal_change[0] < n_threshold || normal_change[1] < n_threshold)
edge_strength = 1.f;
// self-occluding silhouette
else
edge_strength = get_edge_strength(stencil_length, num_farther);
}
// silhouette edge or intersection line
else
{
edge_strength = main_is_hit ? get_edge_strength(stencil_length, num_different + num_missed)
: get_edge_strength(stencil_length, num_missed);
}
pixelColor *= 1 - edge_strength;
const int r = int(255.99f * min(pixelColor.x, 1.f));
const int g = int(255.99f * min(pixelColor.y, 1.f));
const int b = int(255.99f * min(pixelColor.z, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
}
extern "C" __global__ void __raygen__mixedRenderer()
{
// compute a test pattern based on pixel ID
const int ix = optixGetLaunchIndex().x;
const int iy = optixGetLaunchIndex().y;
const int &numPixelSamples = optixLaunchParams.parameters.NUM_PIXEL_SAMPLES;
const int &accumID = optixLaunchParams.frame.accumID;
const auto &camera = optixLaunchParams.camera;
PRD prd;
prd.random.init(ix + accumID * optixLaunchParams.frame.size.x,
iy + accumID * optixLaunchParams.frame.size.y);
prd.pixelColor = 1.f;
// the values we store the PRD pointer in:
uint32_t u0, u1;
packPointer(&prd, u0, u1);
vec3f pixelColor = 0.f;
for (int sampleID = 0; sampleID < numPixelSamples; sampleID++)
{
vec3f rayDir;
if (camera.camera_type == PINHOLE)
{
// normalized screen plane position, in [0,1]^2
vec2f screen;
if (numPixelSamples > 1)
{
screen = (vec2f(ix + prd.random(), iy + prd.random()) / vec2f(optixLaunchParams.frame.size));
}
else
{
screen = (vec2f(ix + 0.5f, iy + 0.5f) / vec2f(optixLaunchParams.frame.size));
}
// generate ray direction
rayDir = screen_to_direction(screen, camera.direction, camera.horizontal, camera.vertical);
}
else if (camera.camera_type == ENV)
{
// sperical coordinate position
vec3f spherical_position;
if (numPixelSamples > 1)
{
spherical_position = ((ix + prd.random()) * camera.horizontal + (iy + prd.random()) * camera.vertical);
}
else
{
spherical_position = ((ix + 0.5f) * camera.horizontal + (iy + 0.5f) * camera.vertical);
}
spherical_position -= vec3f(0.f, M_PI, 0.f);
// change into xyz coordinate position
const vec3f xyz_position(sphere_to_normal(spherical_position));
// view port transform
rayDir = {dot(camera.matrix.vx, xyz_position),
dot(camera.matrix.vy, xyz_position),
dot(camera.matrix.vz, xyz_position)};
}
// radiance rendering
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
RADIANCE_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RADIANCE_RAY_TYPE, // missSBTIndex
u0, u1);
pixelColor += prd.pixelColor;
// edge rendering
prd.pixelColor = 1.0f;
optixTrace(optixLaunchParams.traversable,
camera.position,
rayDir,
0.f, // tmin
1e20f, // tmax
0.0f, // rayTime
OptixVisibilityMask(255),
OPTIX_RAY_FLAG_DISABLE_ANYHIT, //OPTIX_RAY_FLAG_NONE,
MONO_RAY_TYPE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
MONO_RAY_TYPE, // missSBTIndex
u0, u1);
pixelColor *= prd.pixelColor;
}
const int r = int(255.99f * min(pixelColor.x / numPixelSamples, 1.f));
const int g = int(255.99f * min(pixelColor.y / numPixelSamples, 1.f));
const int b = int(255.99f * min(pixelColor.z / numPixelSamples, 1.f));
// convert to 32-bit rgba value (we explicitly set alpha to 0xff
// to make stb_image_write happy ...
const uint32_t rgba = 0xff000000 | (r << 0) | (g << 8) | (b << 16);
// and write to frame buffer ...
const uint32_t fbIndex = ix + iy * optixLaunchParams.frame.size.x;
optixLaunchParams.frame.colorBuffer[fbIndex] = rgba;
} |
a9040b254e02befe6d5c3cbeb20159d48b4248b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/hypercolumn_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <iterator>
#include <sstream>
#define numThreadsPerBlock_1d 256
#define numThreadsPerBlock 256
/*
static inline int updiv(int a, int b){
return (a+b-1)/b;
}
*/
namespace caffe {
template <typename Dtype>
inline __device__ Dtype bilinear_interp(const Dtype &v11, const Dtype &v12,
const Dtype &v21, const Dtype &v22,
Dtype dx, Dtype dy) {
typedef Dtype D;
return (v11 * (D(1)-dy) + v21 * dy) * (D(1)-dx)
+ (v12 * (D(1)-dy) + v22 * dy) * dx;
}
template <typename Dtype>
__global__ void hypercolumn_fwd_kernel(const Dtype* bot_pointer,
int offset_bot,
Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1;
int p12 = ty1 * sw + tx2;
int p21 = ty2 * sw + tx1;
int p22 = ty2 * sw + tx2;
// This is maybe a bit slower than single channel, but less CPU intensive?
for (int ch=0;ch<sch;ch++) {
top_pointer[0] = bilinear_interp(bot_pointer[p11], bot_pointer[p12],
bot_pointer[p21], bot_pointer[p22],
dx, dy);
top_pointer += 1;
bot_pointer += offset_bot;
}
}
}
template <typename Dtype>
void HypercolumnLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//Forward_cpu(bottom, top);
SetupRandPoints(bottom, top);
const int bottom_width = bottom[start_id_]->width();
const int bottom_height = bottom[start_id_]->height();
const int bottom_nums = bottom[start_id_]->num();
int sn_channels = bottom[end_id_+2]->channels();
// Update random point index on the gpu
int *rand_pointer = workspace.mutable_gpu_data();
CUDA_CHECK(hipMemcpy(rand_pointer, &rand_points_[0],
rand_points_.size()*sizeof(int),
hipMemcpyHostToDevice));
CHECK_LE(rand_points_.size(), workspace.shape()[0]*workspace.shape()[1]);
std::vector<const Dtype*> bottom_layers(n_hblobs_);
for (int b = 0; b < n_hblobs_; b++) {
bottom_layers[b] = bottom[b]->gpu_data();
}
// const Dtype* sn_data = bottom[end_id_+2]->gpu_data();
CHECK_EQ(N_*bottom_nums*2, rand_points_.size());
dim3 threadsPerBlock(numThreadsPerBlock_1d, 1);
dim3 numBlocks(updiv(N_*bottom_nums, threadsPerBlock.x), 1);
// Data
Dtype* top_pointer = top[0]->mutable_gpu_data();
int dst_ch = 0;
for (int b = 0; b < n_hblobs_; b++) {
const int cur_nCh = bottom[b]->channels();
const Dtype* bot_pointer = bottom_layers[b];
int offset_bot = width_[b] * height_[b];
// for(int c = 0; c < cur_nCh; c++){
hipLaunchKernelGGL(( hypercolumn_fwd_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0,
bot_pointer, offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, n_channels_, bottom_nums,
width_[b], height_[b], cur_nCh,
width_[0], height_[0],
padf_[b], (Dtype)poolf_[b]);
// }
dst_ch+=cur_nCh;
}
// Labels
top_pointer = top[1]->mutable_gpu_data();
dst_ch = 0;
for (int b = end_id_+2; b < end_id_+3; b++) {
const int cur_nCh = bottom[b]->channels();
const Dtype* bot_pointer = bottom[b]->mutable_gpu_data();
int offset_bot = width_[0] * height_[0];
// for(int c = 0; c < cur_nCh; c++){
hipLaunchKernelGGL(( hypercolumn_fwd_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, bot_pointer,
offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, cur_nCh, bottom_nums,
width_[0], height_[0], cur_nCh,
width_[0], height_[0],
(Dtype)0, (Dtype)1);
// }
dst_ch+=cur_nCh;
}
}
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel_syncadd(Dtype* bot_pointer,
int offset_bot, const Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1;
int p12 = ty1 * sw + tx2;
int p21 = ty2 * sw + tx1;
int p22 = ty2 * sw + tx2;
for (int ch=0;ch<sch;ch++) {
const Dtype dv = top_pointer[0];
caffe_gpu_atomic_add(dv * ((Dtype)1.-dy) * ((Dtype)1.-dx), bot_pointer+p11);
caffe_gpu_atomic_add(dv * dy * ((Dtype)1.-dx), bot_pointer+p21);
caffe_gpu_atomic_add(dv * ((Dtype)1.-dy) * dx, bot_pointer+p12);
caffe_gpu_atomic_add(dv * dy * dx, bot_pointer+p22);
top_pointer += 1;
bot_pointer += offset_bot;
}
}
}
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel_assign(Dtype* bot_pointer, const Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int tx1 = rand_pointer[i*2+0];
int ty1 = rand_pointer[i*2+1];
int p11 = ty1 * sw + tx1;
const Dtype dv = top_pointer[0];
bot_pointer[p11] = dv;
}
}
// save pixel_inds and values, sort_by_key, reduce_by_key version of above
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel(Dtype* bot_pointer, const Dtype* top_pointer,
const int *rand_pointer,
int *pixel_inds, Dtype *pixel_vals,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1 + n*sw*sh*sch;
int p12 = ty1 * sw + tx2 + n*sw*sh*sch;
int p21 = ty2 * sw + tx1 + n*sw*sh*sch;
int p22 = ty2 * sw + tx2 + n*sw*sh*sch;
const Dtype dv = top_pointer[0];
// Save indices and values to accumulate
pixel_inds[i*4+0] = p11;
pixel_inds[i*4+1] = p12;
pixel_inds[i*4+2] = p21;
pixel_inds[i*4+3] = p22;
typedef Dtype D;
pixel_vals[i*4+0] = dv * (D(1)-dy) * (D(1)-dx);
pixel_vals[i*4+1] = dv * (D(1)-dy) * dx;
pixel_vals[i*4+2] = dv * dy * (D(1)-dx);
pixel_vals[i*4+3] = dv * dy * dx;
}
}
template <typename Dtype>
__global__ void write_result_kernel(const int *pixel_inds,
const Dtype *pixel_vals,
Dtype* bot_pointer,
int max_count){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<max_count) {
bot_pointer[pixel_inds[i]] = pixel_vals[i];
}
}
template <typename Dtype>
void HypercolumnLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
// TODO: Fix checking of propagate down to all indices
const int bottom_width = bottom[start_id_]->width();
const int bottom_height = bottom[start_id_]->height();
const int bottom_nums = bottom[start_id_]->num();
int sn_channels = bottom[end_id_+2]->channels();
std::vector<Dtype*> bottom_layers(n_hblobs_);
for (int b = 0; b < n_hblobs_; b++) {
if (!propagate_down[b]) continue;
bottom_layers[b] = bottom[b]->mutable_gpu_diff();
caffe_gpu_set(bottom[b]->count(), (Dtype)0, bottom_layers[b]);
}
const Dtype* sn_data = bottom[end_id_+2]->gpu_diff();
CHECK_EQ(N_*bottom_nums*2, rand_points_.size());
const int *rand_pointer = workspace.gpu_data();
// int *pixel_inds = workspace_inds.mutable_gpu_data();
// Dtype *pixel_vals = workspace_vals.mutable_gpu_data();
// caffe_gpu_set(N_*bottom_nums*4, (int)0, pixel_inds);
// caffe_gpu_set(N_*bottom_nums*4, (Dtype)0, pixel_vals);
dim3 threadsPerBlock(numThreadsPerBlock_1d, 1);
dim3 numBlocks(updiv(N_*bottom_nums, threadsPerBlock.x), 1);
dim3 threadsPerBlock4(numThreadsPerBlock_1d, 1);
dim3 numBlocks4(updiv(N_*bottom_nums*4, threadsPerBlock.x), 1);
const Dtype* top_pointer = top[0]->gpu_diff();
int dst_ch = 0;
for (int b = 0; b < n_hblobs_; b++) {
if (!propagate_down[b]) continue;
const int cur_nCh = bottom[b]->channels();
Dtype* bot_pointer = bottom_layers[b];
int offset_bot = width_[b] * height_[b];
// for(int c = 0; c < cur_nCh; c++) {
hipLaunchKernelGGL(( hypercolumn_bwd_kernel_syncadd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, bot_pointer,
offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, n_channels_, bottom_nums,
width_[b], height_[b], cur_nCh,
width_[0], height_[0],
padf_[b], (Dtype)poolf_[b]);
dst_ch += cur_nCh;
// dst_ch++;
// debug this. not working.
// hypercolumn_bwd_kernel<<<numBlocks, threadsPerBlock>>>(bot_pointer + c*offset_bot,
// top_pointer + dst_ch,
// rand_pointer,
// pixel_inds, pixel_vals,
// N_, n_channels_, bottom_nums,
// width_[b], height_[b], cur_nCh,
// width_[0], height_[0],
// padf_[b], (Dtype)poolf_[b]);
// dst_ch++;
// CUDA_CHECK(hipDeviceSynchronize());
// // TODO: finish implementing this
// thrust::device_ptr<int> t_pixel_inds = thrust::device_pointer_cast(pixel_inds);
// thrust::device_ptr<Dtype> t_pixel_vals = thrust::device_pointer_cast(pixel_vals);
//
// std::ostringstream output;
// output << "orig:\n";
// thrust::copy(t_pixel_inds, t_pixel_inds+1, std::ostream_iterator<int>(output, " "));
// output << "\n";
// thrust::copy(t_pixel_vals, t_pixel_vals+1, std::ostream_iterator<Dtype>(output, " "));
// output << "\n";
// LOG(ERROR) << output;
// output << "sorted:\n";
// try {
// thrust::sort_by_key(t_pixel_inds, t_pixel_inds + N_*bottom_nums*4, t_pixel_vals);
// } catch (thrust::system_error &e) {
// LOG(ERROR) << "Thrust Error: " << e.what();
// }
// thrust::copy(t_pixel_inds, t_pixel_inds+1, std::ostream_iterator<int>(output, " "));
// output << "\n";
// thrust::copy(t_pixel_vals, t_pixel_vals+1, std::ostream_iterator<Dtype>(output, " "));
// output << "\n";
// LOG(ERROR) << output;
// thrust::reduce_by_key(t_pixel_inds,
// t_pixel_inds + N_*bottom_nums*4,
// t_pixel_vals,
// t_pixel_inds + N_*bottom_nums*4,
// t_pixel_vals + N_*bottom_nums*4);
// CUDA_CHECK(hipDeviceSynchronize());
// write_result_kernel<<<numBlocks4, threadsPerBlock4>>>(pixel_inds + N_*bottom_nums*4,
// pixel_vals + N_*bottom_nums*4,
// bot_pointer + c*offset_bot,
// N_*bottom_nums*4);
// CUDA_CHECK(hipDeviceSynchronize());
// }
}
// output labels
top_pointer = top[1]->gpu_diff();
dst_ch = 0;
if (propagate_down[end_id_+2]) {
const int cur_nCh = bottom[end_id_+2]->channels();
Dtype* bot_pointer = bottom[end_id_+2]->mutable_gpu_diff();
int offset_bot = width_[0] * height_[0];
for(int c = 0; c < cur_nCh; c++){
hipLaunchKernelGGL(( hypercolumn_bwd_kernel_assign), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, bot_pointer + c*offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, cur_nCh, bottom_nums,
width_[0], height_[0], cur_nCh,
width_[0], height_[0],
(Dtype)0, (Dtype)1);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HypercolumnLayer);
} // namespace caffe
| a9040b254e02befe6d5c3cbeb20159d48b4248b3.cu | #include "caffe/layers/hypercolumn_layer.hpp"
#include "caffe/util/gpu_util.cuh"
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <iterator>
#include <sstream>
#define numThreadsPerBlock_1d 256
#define numThreadsPerBlock 256
/*
static inline int updiv(int a, int b){
return (a+b-1)/b;
}
*/
namespace caffe {
template <typename Dtype>
inline __device__ Dtype bilinear_interp(const Dtype &v11, const Dtype &v12,
const Dtype &v21, const Dtype &v22,
Dtype dx, Dtype dy) {
typedef Dtype D;
return (v11 * (D(1)-dy) + v21 * dy) * (D(1)-dx)
+ (v12 * (D(1)-dy) + v22 * dy) * dx;
}
template <typename Dtype>
__global__ void hypercolumn_fwd_kernel(const Dtype* bot_pointer,
int offset_bot,
Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1;
int p12 = ty1 * sw + tx2;
int p21 = ty2 * sw + tx1;
int p22 = ty2 * sw + tx2;
// This is maybe a bit slower than single channel, but less CPU intensive?
for (int ch=0;ch<sch;ch++) {
top_pointer[0] = bilinear_interp(bot_pointer[p11], bot_pointer[p12],
bot_pointer[p21], bot_pointer[p22],
dx, dy);
top_pointer += 1;
bot_pointer += offset_bot;
}
}
}
template <typename Dtype>
void HypercolumnLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
//Forward_cpu(bottom, top);
SetupRandPoints(bottom, top);
const int bottom_width = bottom[start_id_]->width();
const int bottom_height = bottom[start_id_]->height();
const int bottom_nums = bottom[start_id_]->num();
int sn_channels = bottom[end_id_+2]->channels();
// Update random point index on the gpu
int *rand_pointer = workspace.mutable_gpu_data();
CUDA_CHECK(cudaMemcpy(rand_pointer, &rand_points_[0],
rand_points_.size()*sizeof(int),
cudaMemcpyHostToDevice));
CHECK_LE(rand_points_.size(), workspace.shape()[0]*workspace.shape()[1]);
std::vector<const Dtype*> bottom_layers(n_hblobs_);
for (int b = 0; b < n_hblobs_; b++) {
bottom_layers[b] = bottom[b]->gpu_data();
}
// const Dtype* sn_data = bottom[end_id_+2]->gpu_data();
CHECK_EQ(N_*bottom_nums*2, rand_points_.size());
dim3 threadsPerBlock(numThreadsPerBlock_1d, 1);
dim3 numBlocks(updiv(N_*bottom_nums, threadsPerBlock.x), 1);
// Data
Dtype* top_pointer = top[0]->mutable_gpu_data();
int dst_ch = 0;
for (int b = 0; b < n_hblobs_; b++) {
const int cur_nCh = bottom[b]->channels();
const Dtype* bot_pointer = bottom_layers[b];
int offset_bot = width_[b] * height_[b];
// for(int c = 0; c < cur_nCh; c++){
hypercolumn_fwd_kernel<<<numBlocks, threadsPerBlock>>>(
bot_pointer, offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, n_channels_, bottom_nums,
width_[b], height_[b], cur_nCh,
width_[0], height_[0],
padf_[b], (Dtype)poolf_[b]);
// }
dst_ch+=cur_nCh;
}
// Labels
top_pointer = top[1]->mutable_gpu_data();
dst_ch = 0;
for (int b = end_id_+2; b < end_id_+3; b++) {
const int cur_nCh = bottom[b]->channels();
const Dtype* bot_pointer = bottom[b]->mutable_gpu_data();
int offset_bot = width_[0] * height_[0];
// for(int c = 0; c < cur_nCh; c++){
hypercolumn_fwd_kernel<<<numBlocks, threadsPerBlock>>>(bot_pointer,
offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, cur_nCh, bottom_nums,
width_[0], height_[0], cur_nCh,
width_[0], height_[0],
(Dtype)0, (Dtype)1);
// }
dst_ch+=cur_nCh;
}
}
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel_syncadd(Dtype* bot_pointer,
int offset_bot, const Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1;
int p12 = ty1 * sw + tx2;
int p21 = ty2 * sw + tx1;
int p22 = ty2 * sw + tx2;
for (int ch=0;ch<sch;ch++) {
const Dtype dv = top_pointer[0];
caffe_gpu_atomic_add(dv * ((Dtype)1.-dy) * ((Dtype)1.-dx), bot_pointer+p11);
caffe_gpu_atomic_add(dv * dy * ((Dtype)1.-dx), bot_pointer+p21);
caffe_gpu_atomic_add(dv * ((Dtype)1.-dy) * dx, bot_pointer+p12);
caffe_gpu_atomic_add(dv * dy * dx, bot_pointer+p22);
top_pointer += 1;
bot_pointer += offset_bot;
}
}
}
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel_assign(Dtype* bot_pointer, const Dtype* top_pointer,
const int *rand_pointer,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
bot_pointer += n*sw*sh*sch;
// begin compute
if (i<N_*bottom_nums) {
int tx1 = rand_pointer[i*2+0];
int ty1 = rand_pointer[i*2+1];
int p11 = ty1 * sw + tx1;
const Dtype dv = top_pointer[0];
bot_pointer[p11] = dv;
}
}
// save pixel_inds and values, sort_by_key, reduce_by_key version of above
template <typename Dtype>
__global__ void hypercolumn_bwd_kernel(Dtype* bot_pointer, const Dtype* top_pointer,
const int *rand_pointer,
int *pixel_inds, Dtype *pixel_vals,
int N_, int n_channels_, int bottom_nums,
int sw, int sh, int sch, int tw, int th,
Dtype padf, Dtype poolf) {
// get pixel location (x,y)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n = i/N_;
top_pointer += i*n_channels_;
// begin compute
if (i<N_*bottom_nums) {
int x_pt = rand_pointer[i*2+0];
int y_pt = rand_pointer[i*2+1];
const Dtype tx = (x_pt-padf)/poolf;
const Dtype ty = (y_pt-padf)/poolf;
int tx1 = (int)tx;
int ty1 = (int)ty;
int tx2 = tx1+1;
int ty2 = ty1+1;
// check if they are within the size limit
tx1 = tx1<0 ? 0 : (tx1<sw ? tx1 : sw-1);
tx2 = tx2<0 ? 0 : (tx2<sw ? tx2 : sw-1);
ty1 = ty1<0 ? 0 : (ty1<sh ? ty1 : sh-1);
ty2 = ty2<0 ? 0 : (ty2<sh ? ty2 : sh-1);
Dtype dx = tx - tx1;
Dtype dy = ty - ty1;
int p11 = ty1 * sw + tx1 + n*sw*sh*sch;
int p12 = ty1 * sw + tx2 + n*sw*sh*sch;
int p21 = ty2 * sw + tx1 + n*sw*sh*sch;
int p22 = ty2 * sw + tx2 + n*sw*sh*sch;
const Dtype dv = top_pointer[0];
// Save indices and values to accumulate
pixel_inds[i*4+0] = p11;
pixel_inds[i*4+1] = p12;
pixel_inds[i*4+2] = p21;
pixel_inds[i*4+3] = p22;
typedef Dtype D;
pixel_vals[i*4+0] = dv * (D(1)-dy) * (D(1)-dx);
pixel_vals[i*4+1] = dv * (D(1)-dy) * dx;
pixel_vals[i*4+2] = dv * dy * (D(1)-dx);
pixel_vals[i*4+3] = dv * dy * dx;
}
}
template <typename Dtype>
__global__ void write_result_kernel(const int *pixel_inds,
const Dtype *pixel_vals,
Dtype* bot_pointer,
int max_count){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<max_count) {
bot_pointer[pixel_inds[i]] = pixel_vals[i];
}
}
template <typename Dtype>
void HypercolumnLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){
// TODO: Fix checking of propagate down to all indices
const int bottom_width = bottom[start_id_]->width();
const int bottom_height = bottom[start_id_]->height();
const int bottom_nums = bottom[start_id_]->num();
int sn_channels = bottom[end_id_+2]->channels();
std::vector<Dtype*> bottom_layers(n_hblobs_);
for (int b = 0; b < n_hblobs_; b++) {
if (!propagate_down[b]) continue;
bottom_layers[b] = bottom[b]->mutable_gpu_diff();
caffe_gpu_set(bottom[b]->count(), (Dtype)0, bottom_layers[b]);
}
const Dtype* sn_data = bottom[end_id_+2]->gpu_diff();
CHECK_EQ(N_*bottom_nums*2, rand_points_.size());
const int *rand_pointer = workspace.gpu_data();
// int *pixel_inds = workspace_inds.mutable_gpu_data();
// Dtype *pixel_vals = workspace_vals.mutable_gpu_data();
// caffe_gpu_set(N_*bottom_nums*4, (int)0, pixel_inds);
// caffe_gpu_set(N_*bottom_nums*4, (Dtype)0, pixel_vals);
dim3 threadsPerBlock(numThreadsPerBlock_1d, 1);
dim3 numBlocks(updiv(N_*bottom_nums, threadsPerBlock.x), 1);
dim3 threadsPerBlock4(numThreadsPerBlock_1d, 1);
dim3 numBlocks4(updiv(N_*bottom_nums*4, threadsPerBlock.x), 1);
const Dtype* top_pointer = top[0]->gpu_diff();
int dst_ch = 0;
for (int b = 0; b < n_hblobs_; b++) {
if (!propagate_down[b]) continue;
const int cur_nCh = bottom[b]->channels();
Dtype* bot_pointer = bottom_layers[b];
int offset_bot = width_[b] * height_[b];
// for(int c = 0; c < cur_nCh; c++) {
hypercolumn_bwd_kernel_syncadd<<<numBlocks, threadsPerBlock>>>(bot_pointer,
offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, n_channels_, bottom_nums,
width_[b], height_[b], cur_nCh,
width_[0], height_[0],
padf_[b], (Dtype)poolf_[b]);
dst_ch += cur_nCh;
// dst_ch++;
// debug this. not working.
// hypercolumn_bwd_kernel<<<numBlocks, threadsPerBlock>>>(bot_pointer + c*offset_bot,
// top_pointer + dst_ch,
// rand_pointer,
// pixel_inds, pixel_vals,
// N_, n_channels_, bottom_nums,
// width_[b], height_[b], cur_nCh,
// width_[0], height_[0],
// padf_[b], (Dtype)poolf_[b]);
// dst_ch++;
// CUDA_CHECK(cudaDeviceSynchronize());
// // TODO: finish implementing this
// thrust::device_ptr<int> t_pixel_inds = thrust::device_pointer_cast(pixel_inds);
// thrust::device_ptr<Dtype> t_pixel_vals = thrust::device_pointer_cast(pixel_vals);
//
// std::ostringstream output;
// output << "orig:\n";
// thrust::copy(t_pixel_inds, t_pixel_inds+1, std::ostream_iterator<int>(output, " "));
// output << "\n";
// thrust::copy(t_pixel_vals, t_pixel_vals+1, std::ostream_iterator<Dtype>(output, " "));
// output << "\n";
// LOG(ERROR) << output;
// output << "sorted:\n";
// try {
// thrust::sort_by_key(t_pixel_inds, t_pixel_inds + N_*bottom_nums*4, t_pixel_vals);
// } catch (thrust::system_error &e) {
// LOG(ERROR) << "Thrust Error: " << e.what();
// }
// thrust::copy(t_pixel_inds, t_pixel_inds+1, std::ostream_iterator<int>(output, " "));
// output << "\n";
// thrust::copy(t_pixel_vals, t_pixel_vals+1, std::ostream_iterator<Dtype>(output, " "));
// output << "\n";
// LOG(ERROR) << output;
// thrust::reduce_by_key(t_pixel_inds,
// t_pixel_inds + N_*bottom_nums*4,
// t_pixel_vals,
// t_pixel_inds + N_*bottom_nums*4,
// t_pixel_vals + N_*bottom_nums*4);
// CUDA_CHECK(cudaDeviceSynchronize());
// write_result_kernel<<<numBlocks4, threadsPerBlock4>>>(pixel_inds + N_*bottom_nums*4,
// pixel_vals + N_*bottom_nums*4,
// bot_pointer + c*offset_bot,
// N_*bottom_nums*4);
// CUDA_CHECK(cudaDeviceSynchronize());
// }
}
// output labels
top_pointer = top[1]->gpu_diff();
dst_ch = 0;
if (propagate_down[end_id_+2]) {
const int cur_nCh = bottom[end_id_+2]->channels();
Dtype* bot_pointer = bottom[end_id_+2]->mutable_gpu_diff();
int offset_bot = width_[0] * height_[0];
for(int c = 0; c < cur_nCh; c++){
hypercolumn_bwd_kernel_assign<<<numBlocks, threadsPerBlock>>>(bot_pointer + c*offset_bot,
top_pointer + dst_ch,
rand_pointer,
N_, cur_nCh, bottom_nums,
width_[0], height_[0], cur_nCh,
width_[0], height_[0],
(Dtype)0, (Dtype)1);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(HypercolumnLayer);
} // namespace caffe
|
eb4cbaf3e691057012a56209b53ab6476775c099.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA heterogeneous kernel ------------------------------------------------------------------------------------------
__global__ void RANSAC_kernel_block(float *model_param_local, flowvector *flowvectors,
int flowvector_count, int *random_numbers, int max_iter, int error_threshold, float convergence_threshold,
int *g_out_id, int *model_candidate, int *outliers_candidate, int *launch_gpu) {
extern __shared__ int l_mem[];
int* outlier_block_count = l_mem;
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int num_blocks = gridDim.x;
float vx_error, vy_error;
int outlier_local_count = 0;
// Each block performs one iteration
for(int loop_count = bx; loop_count < max_iter; loop_count += num_blocks) {
float *model_param =
&model_param_local
[4 *
loop_count]; // xc=model_param_sh[0], yc=model_param_sh[1], D=model_param_sh[2], R=model_param_sh[3]
// Wait until CPU computes F-o-F model
if(tx == 0) {
outlier_block_count[0] = 0;
while(atomicAdd_system(&launch_gpu[loop_count], 0) == 0) {
}
}
__syncthreads();
if(model_param[0] == -2011)
continue;
// Reset local outlier counter
outlier_local_count = 0;
// Compute number of outliers
for(int i = tx; i < flowvector_count; i += blockDim.x) {
flowvector fvreg = flowvectors[i]; // x, y, vx, vy
vx_error = fvreg.x + ((int)((fvreg.x - model_param[0]) * model_param[2]) -
(int)((fvreg.y - model_param[1]) * model_param[3])) -
fvreg.vx;
vy_error = fvreg.y + ((int)((fvreg.y - model_param[1]) * model_param[2]) +
(int)((fvreg.x - model_param[0]) * model_param[3])) -
fvreg.vy;
if((fabs(vx_error) >= error_threshold) || (fabs(vy_error) >= error_threshold)) {
outlier_local_count++;
}
}
atomicAdd(&outlier_block_count[0], outlier_local_count);
__syncthreads();
if(tx == 0) {
// Compare to threshold
if(outlier_block_count[0] < flowvector_count * convergence_threshold) {
int index = atomicAdd(g_out_id, 1);
model_candidate[index] = loop_count;
outliers_candidate[index] = outlier_block_count[0];
}
}
}
}
hipError_t call_RANSAC_kernel_block(int blocks, int threads, float *model_param_local, flowvector *flowvectors,
int flowvector_count, int *random_numbers, int max_iter, int error_threshold, float convergence_threshold,
int *g_out_id, int *model_candidate, int *outliers_candidate, int *launch_gpu, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
hipLaunchKernelGGL(( RANSAC_kernel_block), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, model_param_local, flowvectors,
flowvector_count, random_numbers, max_iter, error_threshold, convergence_threshold,
g_out_id, model_candidate, outliers_candidate, launch_gpu);
hipError_t err = hipGetLastError();
return err;
}
| eb4cbaf3e691057012a56209b53ab6476775c099.cu | /*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA heterogeneous kernel ------------------------------------------------------------------------------------------
__global__ void RANSAC_kernel_block(float *model_param_local, flowvector *flowvectors,
int flowvector_count, int *random_numbers, int max_iter, int error_threshold, float convergence_threshold,
int *g_out_id, int *model_candidate, int *outliers_candidate, int *launch_gpu) {
extern __shared__ int l_mem[];
int* outlier_block_count = l_mem;
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int num_blocks = gridDim.x;
float vx_error, vy_error;
int outlier_local_count = 0;
// Each block performs one iteration
for(int loop_count = bx; loop_count < max_iter; loop_count += num_blocks) {
float *model_param =
&model_param_local
[4 *
loop_count]; // xc=model_param_sh[0], yc=model_param_sh[1], D=model_param_sh[2], R=model_param_sh[3]
// Wait until CPU computes F-o-F model
if(tx == 0) {
outlier_block_count[0] = 0;
while(atomicAdd_system(&launch_gpu[loop_count], 0) == 0) {
}
}
__syncthreads();
if(model_param[0] == -2011)
continue;
// Reset local outlier counter
outlier_local_count = 0;
// Compute number of outliers
for(int i = tx; i < flowvector_count; i += blockDim.x) {
flowvector fvreg = flowvectors[i]; // x, y, vx, vy
vx_error = fvreg.x + ((int)((fvreg.x - model_param[0]) * model_param[2]) -
(int)((fvreg.y - model_param[1]) * model_param[3])) -
fvreg.vx;
vy_error = fvreg.y + ((int)((fvreg.y - model_param[1]) * model_param[2]) +
(int)((fvreg.x - model_param[0]) * model_param[3])) -
fvreg.vy;
if((fabs(vx_error) >= error_threshold) || (fabs(vy_error) >= error_threshold)) {
outlier_local_count++;
}
}
atomicAdd(&outlier_block_count[0], outlier_local_count);
__syncthreads();
if(tx == 0) {
// Compare to threshold
if(outlier_block_count[0] < flowvector_count * convergence_threshold) {
int index = atomicAdd(g_out_id, 1);
model_candidate[index] = loop_count;
outliers_candidate[index] = outlier_block_count[0];
}
}
}
}
cudaError_t call_RANSAC_kernel_block(int blocks, int threads, float *model_param_local, flowvector *flowvectors,
int flowvector_count, int *random_numbers, int max_iter, int error_threshold, float convergence_threshold,
int *g_out_id, int *model_candidate, int *outliers_candidate, int *launch_gpu, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
RANSAC_kernel_block<<<dimGrid, dimBlock, l_mem_size>>>(model_param_local, flowvectors,
flowvector_count, random_numbers, max_iter, error_threshold, convergence_threshold,
g_out_id, model_candidate, outliers_candidate, launch_gpu);
cudaError_t err = cudaGetLastError();
return err;
}
|
28948bff24d2cb83c1f92c1eac2853f45c028a12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Demo code of Cuda programming lecture
*
* This programme is a simple implementation of vector addition in CUDA
*
*
*/
#include <sys/time.h>
#include <cstdlib>
#include <cstdio>
// Device code
__global__ void VecAdd(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 33554432;
size_t size = N * sizeof(int);
int threadsPerBlock = 1024;
int blocksPerGrid = N / threadsPerBlock;
//Time measurement
timeval kernel_start, kernel_end;
timeval global_start, global_end;
float kernel_elapsed_time, global_elapsed_time;
// Allocate host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
// Allocate device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
//Start global timer
gettimeofday(&global_start, NULL);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//Start kernel timer
gettimeofday(&kernel_start, NULL);
// Invoke kernel
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
//Since kernel launch is asynchronized, block the host code until the kernel finishes
hipDeviceSynchronize();
//End kernel timer
gettimeofday(&kernel_end, NULL);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//hipMemcpy is synchronized, no barrier is needed here
//Stop global timer
gettimeofday(&global_end, NULL);
//get kernel elapsed time
kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000;
//get global elapsed time
global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000;
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time);
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
//Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| 28948bff24d2cb83c1f92c1eac2853f45c028a12.cu | /**
* Demo code of Cuda programming lecture
*
* This programme is a simple implementation of vector addition in CUDA
*
*
*/
#include <sys/time.h>
#include <cstdlib>
#include <cstdio>
// Device code
__global__ void VecAdd(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 33554432;
size_t size = N * sizeof(int);
int threadsPerBlock = 1024;
int blocksPerGrid = N / threadsPerBlock;
//Time measurement
timeval kernel_start, kernel_end;
timeval global_start, global_end;
float kernel_elapsed_time, global_elapsed_time;
// Allocate host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
// Allocate device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//Start global timer
gettimeofday(&global_start, NULL);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Start kernel timer
gettimeofday(&kernel_start, NULL);
// Invoke kernel
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C);
//Since kernel launch is asynchronized, block the host code until the kernel finishes
cudaDeviceSynchronize();
//End kernel timer
gettimeofday(&kernel_end, NULL);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//cudaMemcpy is synchronized, no barrier is needed here
//Stop global timer
gettimeofday(&global_end, NULL);
//get kernel elapsed time
kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000;
//get global elapsed time
global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000;
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time);
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
//Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
14cfad76b9f92fe66edb863c0543fae827464e73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Synapse Class C++
// Synapse.cpp
//
// Author: Nasir Ahmad
// Date: 7/12/2015
#include "Synapses.h"
#include "../Helpers/CUDAErrorCheckHelpers.h"
#include "../Helpers/TerminalHelpers.h"
#include "../Helpers/RandomStateManager.h"
#include <algorithm> // for random shuffle
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/count.h>
// Synapses Constructor
Synapses::Synapses() {
total_number_of_synapses = 0;
temp_number_of_synapses_in_last_group = 0;
largest_synapse_group_size = 0;
old_largest_number_of_blocks_x = 0;
neuron_indices_set_up_on_device = false;
original_synapse_indices = NULL;
// Full Matrices
presynaptic_neuron_indices = NULL;
postsynaptic_neuron_indices = NULL;
synaptic_efficacies_or_weights = NULL;
d_temp_presynaptic_neuron_indices = NULL;
d_temp_postsynaptic_neuron_indices = NULL;
d_temp_synaptic_efficacies_or_weights = NULL;
d_presynaptic_neuron_indices = NULL;
d_postsynaptic_neuron_indices = NULL;
d_synaptic_efficacies_or_weights = NULL;
d_states_for_random_number_generation = NULL;
print_synapse_group_details = false;
// On construction, seed
srand(42); // Seeding the random numbers
}
// Synapses Destructor
Synapses::~Synapses() {
free(presynaptic_neuron_indices);
free(postsynaptic_neuron_indices);
free(synaptic_efficacies_or_weights);
CudaSafeCall(hipFree(d_presynaptic_neuron_indices));
CudaSafeCall(hipFree(d_postsynaptic_neuron_indices));
CudaSafeCall(hipFree(d_synaptic_efficacies_or_weights));
}
void Synapses::AddGroup(int presynaptic_group_id,
int postsynaptic_group_id,
Neurons * neurons,
Neurons * input_neurons,
float timestep,
synapse_parameters_struct * synapse_params) {
if (print_synapse_group_details == true) {
printf("Adding synapse group...\n");
printf("presynaptic_group_id: %d\n", presynaptic_group_id);
printf("postsynaptic_group_id: %d\n", postsynaptic_group_id);
}
int* start_neuron_indices_for_neuron_groups = neurons->start_neuron_indices_for_each_group;
int* start_neuron_indices_for_input_neuron_groups = input_neurons->start_neuron_indices_for_each_group;
int* last_neuron_indices_for_neuron_groups = neurons->last_neuron_indices_for_each_group;
int* last_neuron_indices_for_input_neuron_groups = input_neurons->last_neuron_indices_for_each_group;
int * presynaptic_group_shape;
int * postsynaptic_group_shape;
int prestart = 0;
int preend = 0;
int poststart = 0;
// Calculate presynaptic group start and end indices
// Also assign presynaptic group shape
bool presynaptic_group_is_input = PRESYNAPTIC_IS_INPUT(presynaptic_group_id);
if (presynaptic_group_is_input) {
// if (stdp_on == true) print_message_and_exit("Plasticity between input neurons and model neurons is not currently supported.");
int corrected_presynaptic_group_id = CORRECTED_PRESYNAPTIC_ID(presynaptic_group_id, presynaptic_group_is_input);
presynaptic_group_shape = input_neurons->group_shapes[corrected_presynaptic_group_id];
if (presynaptic_group_id < -1){
prestart = start_neuron_indices_for_input_neuron_groups[corrected_presynaptic_group_id];
}
preend = last_neuron_indices_for_input_neuron_groups[corrected_presynaptic_group_id] + 1;
} else {
presynaptic_group_shape = neurons->group_shapes[presynaptic_group_id];
if (presynaptic_group_id > 0){
prestart = start_neuron_indices_for_neuron_groups[presynaptic_group_id];
}
preend = last_neuron_indices_for_neuron_groups[presynaptic_group_id] + 1;
}
// Calculate postsynaptic group start and end indices
// Also assign postsynaptic group shape
if (postsynaptic_group_id < 0) { // If presynaptic group is Input group EXIT
print_message_and_exit("Input groups cannot be a postsynaptic neuron group.");
} else if (postsynaptic_group_id >= 0){
postsynaptic_group_shape = neurons->group_shapes[postsynaptic_group_id];
poststart = start_neuron_indices_for_neuron_groups[postsynaptic_group_id];
}
int postend = last_neuron_indices_for_neuron_groups[postsynaptic_group_id] + 1;
if (print_synapse_group_details == true) {
const char * presynaptic_group_type_string = (presynaptic_group_id < 0) ? "input_neurons" : "neurons";
printf("Presynaptic neurons start index: %d (%s)\n", prestart, presynaptic_group_type_string);
printf("Presynaptic neurons end index: %d (%s)\n", preend, presynaptic_group_type_string);
printf("Postsynaptic neurons start index: %d (neurons)\n", poststart);
printf("Postsynaptic neurons end index: %d (neurons)\n", postend);
}
int original_number_of_synapses = total_number_of_synapses;
// Carry out the creation of the connectivity matrix
switch (synapse_params->connectivity_type){
case CONNECTIVITY_TYPE_ALL_TO_ALL:
{
int increment = (preend-prestart)*(postend-poststart);
this->increment_number_of_synapses(increment);
// If the connectivity is all_to_all
for (int i = prestart; i < preend; i++){
for (int j = poststart; j < postend; j++){
// Index
int idx = original_number_of_synapses + (i-prestart)*(postend-poststart) + (j-poststart);
// Setup Synapses
presynaptic_neuron_indices[idx] = CORRECTED_PRESYNAPTIC_ID(i, presynaptic_group_is_input);
postsynaptic_neuron_indices[idx] = j;
}
}
break;
}
case CONNECTIVITY_TYPE_ONE_TO_ONE:
{
int increment = (preend-prestart);
this->increment_number_of_synapses(increment);
// If the connectivity is one_to_one
if ((preend-prestart) != (postend-poststart)) print_message_and_exit("Unequal populations for one_to_one.");
// Create the connectivity
for (int i = 0; i < (preend-prestart); i++){
presynaptic_neuron_indices[original_number_of_synapses + i] = CORRECTED_PRESYNAPTIC_ID(prestart + i, presynaptic_group_is_input);
postsynaptic_neuron_indices[original_number_of_synapses + i] = poststart + i;
}
break;
}
case CONNECTIVITY_TYPE_RANDOM: //JI DO
{
// If the connectivity is random
// Begin a count
for (int i = prestart; i < preend; i++){
for (int j = poststart; j < postend; j++){
// Probability of connection
float prob = ((float)rand() / (RAND_MAX));
// If it is within the probability range, connect!
if (prob < synapse_params->random_connectivity_probability){
this->increment_number_of_synapses(1);
// Setup Synapses
presynaptic_neuron_indices[total_number_of_synapses - 1] = CORRECTED_PRESYNAPTIC_ID(i, presynaptic_group_is_input);
postsynaptic_neuron_indices[total_number_of_synapses - 1] = j;
}
}
}
break;
}
case CONNECTIVITY_TYPE_GAUSSIAN_SAMPLE:
{
float standard_deviation_sigma = synapse_params->gaussian_synapses_standard_deviation;
int number_of_new_synapses_per_postsynaptic_neuron = synapse_params->gaussian_synapses_per_postsynaptic_neuron;
int number_of_postsynaptic_neurons_in_group = postend - poststart;
int total_number_of_new_synapses = number_of_new_synapses_per_postsynaptic_neuron * number_of_postsynaptic_neurons_in_group;
this->increment_number_of_synapses(total_number_of_new_synapses);
if (total_number_of_new_synapses > largest_synapse_group_size) {
largest_synapse_group_size = total_number_of_new_synapses;
CudaSafeCall(hipMalloc((void **)&d_temp_presynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses));
CudaSafeCall(hipMalloc((void **)&d_temp_postsynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses));
}
hipLaunchKernelGGL(( set_neuron_indices_by_sampling_from_normal_distribution), dim3(RandomStateManager::instance()->block_dimensions), dim3(RandomStateManager::instance()->threads_per_block), 0, 0, total_number_of_new_synapses, postsynaptic_group_id, poststart, prestart, postsynaptic_group_shape[0], postsynaptic_group_shape[1], presynaptic_group_shape[0], presynaptic_group_shape[1], number_of_new_synapses_per_postsynaptic_neuron, number_of_postsynaptic_neurons_in_group, d_temp_presynaptic_neuron_indices, d_temp_postsynaptic_neuron_indices, d_temp_synaptic_efficacies_or_weights, standard_deviation_sigma, presynaptic_group_is_input, RandomStateManager::instance()->d_states);
CudaCheckError();
CudaSafeCall(hipMemcpy(&presynaptic_neuron_indices[original_number_of_synapses], d_temp_presynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(&postsynaptic_neuron_indices[original_number_of_synapses], d_temp_postsynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses, hipMemcpyDeviceToHost));
break;
}
case CONNECTIVITY_TYPE_SINGLE:
{
// If we desire a single connection
this->increment_number_of_synapses(1);
// Setup Synapses
presynaptic_neuron_indices[original_number_of_synapses] = CORRECTED_PRESYNAPTIC_ID(prestart + int(synapse_params->pairwise_connect_presynaptic), presynaptic_group_is_input);
postsynaptic_neuron_indices[original_number_of_synapses] = poststart + int(synapse_params->pairwise_connect_postsynaptic);
break;
}
default:
{
print_message_and_exit("Unknown Connection Type.");
break;
}
}
temp_number_of_synapses_in_last_group = total_number_of_synapses - original_number_of_synapses;
if (print_synapse_group_details == true) printf("%d new synapses added.\n\n", temp_number_of_synapses_in_last_group);
for (int i = original_number_of_synapses; i < total_number_of_synapses; i++){
float weight_range_bottom = synapse_params->weight_range_bottom;
float weight_range_top = synapse_params->weight_range_top;
if (weight_range_bottom == weight_range_top) {
synaptic_efficacies_or_weights[i] = weight_range_bottom;
} else {
float weight = weight_range_bottom + (weight_range_top - weight_range_bottom)*((float)rand() / (RAND_MAX));
synaptic_efficacies_or_weights[i] = weight;
}
original_synapse_indices[i] = i;
}
}
void Synapses::increment_number_of_synapses(int increment) {
total_number_of_synapses += increment;
presynaptic_neuron_indices = (int*)realloc(presynaptic_neuron_indices, total_number_of_synapses * sizeof(int));
postsynaptic_neuron_indices = (int*)realloc(postsynaptic_neuron_indices, total_number_of_synapses * sizeof(int));
synaptic_efficacies_or_weights = (float*)realloc(synaptic_efficacies_or_weights, total_number_of_synapses * sizeof(float));
original_synapse_indices = (int*)realloc(original_synapse_indices, total_number_of_synapses * sizeof(int));
}
void Synapses::allocate_device_pointers() {
printf("Allocating synapse device pointers...\n");
CudaSafeCall(hipMalloc((void **)&d_presynaptic_neuron_indices, sizeof(int)*total_number_of_synapses));
CudaSafeCall(hipMalloc((void **)&d_postsynaptic_neuron_indices, sizeof(int)*total_number_of_synapses));
CudaSafeCall(hipMalloc((void **)&d_synaptic_efficacies_or_weights, sizeof(float)*total_number_of_synapses));
}
void Synapses::copy_constants_and_initial_efficacies_to_device() {
printf("Copying synaptic constants and initial efficacies to device...\n");
CudaSafeCall(hipMemcpy(d_presynaptic_neuron_indices, presynaptic_neuron_indices, sizeof(int)*total_number_of_synapses, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_postsynaptic_neuron_indices, postsynaptic_neuron_indices, sizeof(int)*total_number_of_synapses, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy(d_synaptic_efficacies_or_weights, synaptic_efficacies_or_weights, sizeof(float)*total_number_of_synapses, hipMemcpyHostToDevice));
}
// Provides order of magnitude speedup for LIF (All to all atleast).
// Because all synapses contribute to current_injection on every iteration, having all threads in a block accessing only 1 or 2 positions in memory causing massive slowdown.
// Randomising order of synapses means that each block is accessing a larger number of points in memory.
void Synapses::shuffle_synapses() {
printf("Shuffling synapses...\n");
std::random_shuffle(&original_synapse_indices[0], &original_synapse_indices[total_number_of_synapses]);
int* new_presynaptic_neuron_indices = (int *)malloc(total_number_of_synapses*sizeof(int));
int* new_postsynaptic_neuron_indices = (int *)malloc(total_number_of_synapses*sizeof(int));
float* new_synaptic_efficacies_or_weights = (float *)malloc(total_number_of_synapses*sizeof(float));
for(int i = 0; i < total_number_of_synapses; i++) {
new_presynaptic_neuron_indices[i] = presynaptic_neuron_indices[original_synapse_indices[i]];
new_postsynaptic_neuron_indices[i] = postsynaptic_neuron_indices[original_synapse_indices[i]];
new_synaptic_efficacies_or_weights[i] = synaptic_efficacies_or_weights[original_synapse_indices[i]];
}
presynaptic_neuron_indices = new_presynaptic_neuron_indices;
postsynaptic_neuron_indices = new_postsynaptic_neuron_indices;
synaptic_efficacies_or_weights = new_synaptic_efficacies_or_weights;
}
void Synapses::set_threads_per_block_and_blocks_per_grid(int threads) {
threads_per_block.x = threads;
number_of_synapse_blocks_per_grid = dim3(1000);
}
__global__ void set_neuron_indices_by_sampling_from_normal_distribution(int total_number_of_new_synapses, int postsynaptic_group_id, int poststart, int prestart, int post_width, int post_height, int pre_width, int pre_height, int number_of_new_synapses_per_postsynaptic_neuron, int number_of_postsynaptic_neurons_in_group, int * d_presynaptic_neuron_indices, int * d_postsynaptic_neuron_indices, float * d_synaptic_efficacies_or_weights, float standard_deviation_sigma, bool presynaptic_group_is_input, hiprandState_t* d_states) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idx = idx;
while (idx < total_number_of_new_synapses) {
int postsynaptic_neuron_id = idx / number_of_new_synapses_per_postsynaptic_neuron;
d_postsynaptic_neuron_indices[idx] = poststart + postsynaptic_neuron_id;
int postsynaptic_x = postsynaptic_neuron_id % post_width;
int postsynaptic_y = floor((float)(postsynaptic_neuron_id) / post_width);
float fractional_x = (float)postsynaptic_x / post_width;
float fractional_y = (float)postsynaptic_y / post_height;
int corresponding_presynaptic_centre_x = floor((float)pre_width * fractional_x);
int corresponding_presynaptic_centre_y = floor((float)pre_height * fractional_y);
bool presynaptic_x_set = false;
bool presynaptic_y_set = false;
int presynaptic_x = -1;
int presynaptic_y = -1;
while (true) {
if (presynaptic_x_set == false) {
float value_from_normal_distribution_for_x = hiprand_normal(&d_states[t_idx]);
float scaled_value_from_normal_distribution_for_x = standard_deviation_sigma * value_from_normal_distribution_for_x;
int rounded_scaled_value_from_normal_distribution_for_x = round(scaled_value_from_normal_distribution_for_x);
presynaptic_x = corresponding_presynaptic_centre_x + rounded_scaled_value_from_normal_distribution_for_x;
if ((presynaptic_x > -1) && (presynaptic_x < pre_width)) {
presynaptic_x_set = true;
}
}
if (presynaptic_y_set == false) {
float value_from_normal_distribution_for_y = hiprand_normal(&d_states[t_idx]);
float scaled_value_from_normal_distribution_for_y = standard_deviation_sigma * value_from_normal_distribution_for_y;
int rounded_scaled_value_from_normal_distribution_for_y = round(scaled_value_from_normal_distribution_for_y);
presynaptic_y = corresponding_presynaptic_centre_y + rounded_scaled_value_from_normal_distribution_for_y;
if ((presynaptic_y > -1) && (presynaptic_y < pre_height)) {
presynaptic_y_set = true;
}
}
if (presynaptic_x_set && presynaptic_y_set) {
d_presynaptic_neuron_indices[idx] = CORRECTED_PRESYNAPTIC_ID(prestart + presynaptic_x + presynaptic_y*pre_width, presynaptic_group_is_input);
break;
}
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} | 14cfad76b9f92fe66edb863c0543fae827464e73.cu | // Synapse Class C++
// Synapse.cpp
//
// Author: Nasir Ahmad
// Date: 7/12/2015
#include "Synapses.h"
#include "../Helpers/CUDAErrorCheckHelpers.h"
#include "../Helpers/TerminalHelpers.h"
#include "../Helpers/RandomStateManager.h"
#include <algorithm> // for random shuffle
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/count.h>
// Synapses Constructor
Synapses::Synapses() {
total_number_of_synapses = 0;
temp_number_of_synapses_in_last_group = 0;
largest_synapse_group_size = 0;
old_largest_number_of_blocks_x = 0;
neuron_indices_set_up_on_device = false;
original_synapse_indices = NULL;
// Full Matrices
presynaptic_neuron_indices = NULL;
postsynaptic_neuron_indices = NULL;
synaptic_efficacies_or_weights = NULL;
d_temp_presynaptic_neuron_indices = NULL;
d_temp_postsynaptic_neuron_indices = NULL;
d_temp_synaptic_efficacies_or_weights = NULL;
d_presynaptic_neuron_indices = NULL;
d_postsynaptic_neuron_indices = NULL;
d_synaptic_efficacies_or_weights = NULL;
d_states_for_random_number_generation = NULL;
print_synapse_group_details = false;
// On construction, seed
srand(42); // Seeding the random numbers
}
// Synapses Destructor
Synapses::~Synapses() {
free(presynaptic_neuron_indices);
free(postsynaptic_neuron_indices);
free(synaptic_efficacies_or_weights);
CudaSafeCall(cudaFree(d_presynaptic_neuron_indices));
CudaSafeCall(cudaFree(d_postsynaptic_neuron_indices));
CudaSafeCall(cudaFree(d_synaptic_efficacies_or_weights));
}
void Synapses::AddGroup(int presynaptic_group_id,
int postsynaptic_group_id,
Neurons * neurons,
Neurons * input_neurons,
float timestep,
synapse_parameters_struct * synapse_params) {
if (print_synapse_group_details == true) {
printf("Adding synapse group...\n");
printf("presynaptic_group_id: %d\n", presynaptic_group_id);
printf("postsynaptic_group_id: %d\n", postsynaptic_group_id);
}
int* start_neuron_indices_for_neuron_groups = neurons->start_neuron_indices_for_each_group;
int* start_neuron_indices_for_input_neuron_groups = input_neurons->start_neuron_indices_for_each_group;
int* last_neuron_indices_for_neuron_groups = neurons->last_neuron_indices_for_each_group;
int* last_neuron_indices_for_input_neuron_groups = input_neurons->last_neuron_indices_for_each_group;
int * presynaptic_group_shape;
int * postsynaptic_group_shape;
int prestart = 0;
int preend = 0;
int poststart = 0;
// Calculate presynaptic group start and end indices
// Also assign presynaptic group shape
bool presynaptic_group_is_input = PRESYNAPTIC_IS_INPUT(presynaptic_group_id);
if (presynaptic_group_is_input) {
// if (stdp_on == true) print_message_and_exit("Plasticity between input neurons and model neurons is not currently supported.");
int corrected_presynaptic_group_id = CORRECTED_PRESYNAPTIC_ID(presynaptic_group_id, presynaptic_group_is_input);
presynaptic_group_shape = input_neurons->group_shapes[corrected_presynaptic_group_id];
if (presynaptic_group_id < -1){
prestart = start_neuron_indices_for_input_neuron_groups[corrected_presynaptic_group_id];
}
preend = last_neuron_indices_for_input_neuron_groups[corrected_presynaptic_group_id] + 1;
} else {
presynaptic_group_shape = neurons->group_shapes[presynaptic_group_id];
if (presynaptic_group_id > 0){
prestart = start_neuron_indices_for_neuron_groups[presynaptic_group_id];
}
preend = last_neuron_indices_for_neuron_groups[presynaptic_group_id] + 1;
}
// Calculate postsynaptic group start and end indices
// Also assign postsynaptic group shape
if (postsynaptic_group_id < 0) { // If presynaptic group is Input group EXIT
print_message_and_exit("Input groups cannot be a postsynaptic neuron group.");
} else if (postsynaptic_group_id >= 0){
postsynaptic_group_shape = neurons->group_shapes[postsynaptic_group_id];
poststart = start_neuron_indices_for_neuron_groups[postsynaptic_group_id];
}
int postend = last_neuron_indices_for_neuron_groups[postsynaptic_group_id] + 1;
if (print_synapse_group_details == true) {
const char * presynaptic_group_type_string = (presynaptic_group_id < 0) ? "input_neurons" : "neurons";
printf("Presynaptic neurons start index: %d (%s)\n", prestart, presynaptic_group_type_string);
printf("Presynaptic neurons end index: %d (%s)\n", preend, presynaptic_group_type_string);
printf("Postsynaptic neurons start index: %d (neurons)\n", poststart);
printf("Postsynaptic neurons end index: %d (neurons)\n", postend);
}
int original_number_of_synapses = total_number_of_synapses;
// Carry out the creation of the connectivity matrix
switch (synapse_params->connectivity_type){
case CONNECTIVITY_TYPE_ALL_TO_ALL:
{
int increment = (preend-prestart)*(postend-poststart);
this->increment_number_of_synapses(increment);
// If the connectivity is all_to_all
for (int i = prestart; i < preend; i++){
for (int j = poststart; j < postend; j++){
// Index
int idx = original_number_of_synapses + (i-prestart)*(postend-poststart) + (j-poststart);
// Setup Synapses
presynaptic_neuron_indices[idx] = CORRECTED_PRESYNAPTIC_ID(i, presynaptic_group_is_input);
postsynaptic_neuron_indices[idx] = j;
}
}
break;
}
case CONNECTIVITY_TYPE_ONE_TO_ONE:
{
int increment = (preend-prestart);
this->increment_number_of_synapses(increment);
// If the connectivity is one_to_one
if ((preend-prestart) != (postend-poststart)) print_message_and_exit("Unequal populations for one_to_one.");
// Create the connectivity
for (int i = 0; i < (preend-prestart); i++){
presynaptic_neuron_indices[original_number_of_synapses + i] = CORRECTED_PRESYNAPTIC_ID(prestart + i, presynaptic_group_is_input);
postsynaptic_neuron_indices[original_number_of_synapses + i] = poststart + i;
}
break;
}
case CONNECTIVITY_TYPE_RANDOM: //JI DO
{
// If the connectivity is random
// Begin a count
for (int i = prestart; i < preend; i++){
for (int j = poststart; j < postend; j++){
// Probability of connection
float prob = ((float)rand() / (RAND_MAX));
// If it is within the probability range, connect!
if (prob < synapse_params->random_connectivity_probability){
this->increment_number_of_synapses(1);
// Setup Synapses
presynaptic_neuron_indices[total_number_of_synapses - 1] = CORRECTED_PRESYNAPTIC_ID(i, presynaptic_group_is_input);
postsynaptic_neuron_indices[total_number_of_synapses - 1] = j;
}
}
}
break;
}
case CONNECTIVITY_TYPE_GAUSSIAN_SAMPLE:
{
float standard_deviation_sigma = synapse_params->gaussian_synapses_standard_deviation;
int number_of_new_synapses_per_postsynaptic_neuron = synapse_params->gaussian_synapses_per_postsynaptic_neuron;
int number_of_postsynaptic_neurons_in_group = postend - poststart;
int total_number_of_new_synapses = number_of_new_synapses_per_postsynaptic_neuron * number_of_postsynaptic_neurons_in_group;
this->increment_number_of_synapses(total_number_of_new_synapses);
if (total_number_of_new_synapses > largest_synapse_group_size) {
largest_synapse_group_size = total_number_of_new_synapses;
CudaSafeCall(cudaMalloc((void **)&d_temp_presynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses));
CudaSafeCall(cudaMalloc((void **)&d_temp_postsynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses));
}
set_neuron_indices_by_sampling_from_normal_distribution<<<RandomStateManager::instance()->block_dimensions, RandomStateManager::instance()->threads_per_block>>>(total_number_of_new_synapses, postsynaptic_group_id, poststart, prestart, postsynaptic_group_shape[0], postsynaptic_group_shape[1], presynaptic_group_shape[0], presynaptic_group_shape[1], number_of_new_synapses_per_postsynaptic_neuron, number_of_postsynaptic_neurons_in_group, d_temp_presynaptic_neuron_indices, d_temp_postsynaptic_neuron_indices, d_temp_synaptic_efficacies_or_weights, standard_deviation_sigma, presynaptic_group_is_input, RandomStateManager::instance()->d_states);
CudaCheckError();
CudaSafeCall(cudaMemcpy(&presynaptic_neuron_indices[original_number_of_synapses], d_temp_presynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(&postsynaptic_neuron_indices[original_number_of_synapses], d_temp_postsynaptic_neuron_indices, sizeof(int)*total_number_of_new_synapses, cudaMemcpyDeviceToHost));
break;
}
case CONNECTIVITY_TYPE_SINGLE:
{
// If we desire a single connection
this->increment_number_of_synapses(1);
// Setup Synapses
presynaptic_neuron_indices[original_number_of_synapses] = CORRECTED_PRESYNAPTIC_ID(prestart + int(synapse_params->pairwise_connect_presynaptic), presynaptic_group_is_input);
postsynaptic_neuron_indices[original_number_of_synapses] = poststart + int(synapse_params->pairwise_connect_postsynaptic);
break;
}
default:
{
print_message_and_exit("Unknown Connection Type.");
break;
}
}
temp_number_of_synapses_in_last_group = total_number_of_synapses - original_number_of_synapses;
if (print_synapse_group_details == true) printf("%d new synapses added.\n\n", temp_number_of_synapses_in_last_group);
for (int i = original_number_of_synapses; i < total_number_of_synapses; i++){
float weight_range_bottom = synapse_params->weight_range_bottom;
float weight_range_top = synapse_params->weight_range_top;
if (weight_range_bottom == weight_range_top) {
synaptic_efficacies_or_weights[i] = weight_range_bottom;
} else {
float weight = weight_range_bottom + (weight_range_top - weight_range_bottom)*((float)rand() / (RAND_MAX));
synaptic_efficacies_or_weights[i] = weight;
}
original_synapse_indices[i] = i;
}
}
void Synapses::increment_number_of_synapses(int increment) {
total_number_of_synapses += increment;
presynaptic_neuron_indices = (int*)realloc(presynaptic_neuron_indices, total_number_of_synapses * sizeof(int));
postsynaptic_neuron_indices = (int*)realloc(postsynaptic_neuron_indices, total_number_of_synapses * sizeof(int));
synaptic_efficacies_or_weights = (float*)realloc(synaptic_efficacies_or_weights, total_number_of_synapses * sizeof(float));
original_synapse_indices = (int*)realloc(original_synapse_indices, total_number_of_synapses * sizeof(int));
}
void Synapses::allocate_device_pointers() {
printf("Allocating synapse device pointers...\n");
CudaSafeCall(cudaMalloc((void **)&d_presynaptic_neuron_indices, sizeof(int)*total_number_of_synapses));
CudaSafeCall(cudaMalloc((void **)&d_postsynaptic_neuron_indices, sizeof(int)*total_number_of_synapses));
CudaSafeCall(cudaMalloc((void **)&d_synaptic_efficacies_or_weights, sizeof(float)*total_number_of_synapses));
}
void Synapses::copy_constants_and_initial_efficacies_to_device() {
printf("Copying synaptic constants and initial efficacies to device...\n");
CudaSafeCall(cudaMemcpy(d_presynaptic_neuron_indices, presynaptic_neuron_indices, sizeof(int)*total_number_of_synapses, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_postsynaptic_neuron_indices, postsynaptic_neuron_indices, sizeof(int)*total_number_of_synapses, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy(d_synaptic_efficacies_or_weights, synaptic_efficacies_or_weights, sizeof(float)*total_number_of_synapses, cudaMemcpyHostToDevice));
}
// Provides order of magnitude speedup for LIF (All to all atleast).
// Because all synapses contribute to current_injection on every iteration, having all threads in a block accessing only 1 or 2 positions in memory causing massive slowdown.
// Randomising order of synapses means that each block is accessing a larger number of points in memory.
void Synapses::shuffle_synapses() {
printf("Shuffling synapses...\n");
std::random_shuffle(&original_synapse_indices[0], &original_synapse_indices[total_number_of_synapses]);
int* new_presynaptic_neuron_indices = (int *)malloc(total_number_of_synapses*sizeof(int));
int* new_postsynaptic_neuron_indices = (int *)malloc(total_number_of_synapses*sizeof(int));
float* new_synaptic_efficacies_or_weights = (float *)malloc(total_number_of_synapses*sizeof(float));
for(int i = 0; i < total_number_of_synapses; i++) {
new_presynaptic_neuron_indices[i] = presynaptic_neuron_indices[original_synapse_indices[i]];
new_postsynaptic_neuron_indices[i] = postsynaptic_neuron_indices[original_synapse_indices[i]];
new_synaptic_efficacies_or_weights[i] = synaptic_efficacies_or_weights[original_synapse_indices[i]];
}
presynaptic_neuron_indices = new_presynaptic_neuron_indices;
postsynaptic_neuron_indices = new_postsynaptic_neuron_indices;
synaptic_efficacies_or_weights = new_synaptic_efficacies_or_weights;
}
void Synapses::set_threads_per_block_and_blocks_per_grid(int threads) {
threads_per_block.x = threads;
number_of_synapse_blocks_per_grid = dim3(1000);
}
__global__ void set_neuron_indices_by_sampling_from_normal_distribution(int total_number_of_new_synapses, int postsynaptic_group_id, int poststart, int prestart, int post_width, int post_height, int pre_width, int pre_height, int number_of_new_synapses_per_postsynaptic_neuron, int number_of_postsynaptic_neurons_in_group, int * d_presynaptic_neuron_indices, int * d_postsynaptic_neuron_indices, float * d_synaptic_efficacies_or_weights, float standard_deviation_sigma, bool presynaptic_group_is_input, curandState_t* d_states) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int t_idx = idx;
while (idx < total_number_of_new_synapses) {
int postsynaptic_neuron_id = idx / number_of_new_synapses_per_postsynaptic_neuron;
d_postsynaptic_neuron_indices[idx] = poststart + postsynaptic_neuron_id;
int postsynaptic_x = postsynaptic_neuron_id % post_width;
int postsynaptic_y = floor((float)(postsynaptic_neuron_id) / post_width);
float fractional_x = (float)postsynaptic_x / post_width;
float fractional_y = (float)postsynaptic_y / post_height;
int corresponding_presynaptic_centre_x = floor((float)pre_width * fractional_x);
int corresponding_presynaptic_centre_y = floor((float)pre_height * fractional_y);
bool presynaptic_x_set = false;
bool presynaptic_y_set = false;
int presynaptic_x = -1;
int presynaptic_y = -1;
while (true) {
if (presynaptic_x_set == false) {
float value_from_normal_distribution_for_x = curand_normal(&d_states[t_idx]);
float scaled_value_from_normal_distribution_for_x = standard_deviation_sigma * value_from_normal_distribution_for_x;
int rounded_scaled_value_from_normal_distribution_for_x = round(scaled_value_from_normal_distribution_for_x);
presynaptic_x = corresponding_presynaptic_centre_x + rounded_scaled_value_from_normal_distribution_for_x;
if ((presynaptic_x > -1) && (presynaptic_x < pre_width)) {
presynaptic_x_set = true;
}
}
if (presynaptic_y_set == false) {
float value_from_normal_distribution_for_y = curand_normal(&d_states[t_idx]);
float scaled_value_from_normal_distribution_for_y = standard_deviation_sigma * value_from_normal_distribution_for_y;
int rounded_scaled_value_from_normal_distribution_for_y = round(scaled_value_from_normal_distribution_for_y);
presynaptic_y = corresponding_presynaptic_centre_y + rounded_scaled_value_from_normal_distribution_for_y;
if ((presynaptic_y > -1) && (presynaptic_y < pre_height)) {
presynaptic_y_set = true;
}
}
if (presynaptic_x_set && presynaptic_y_set) {
d_presynaptic_neuron_indices[idx] = CORRECTED_PRESYNAPTIC_ID(prestart + presynaptic_x + presynaptic_y*pre_width, presynaptic_group_is_input);
break;
}
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} |
c7d3ae147a49d9b1deac6f07af0b21c7bf006b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <sstream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "check.hpp"
using namespace std::chrono;
__global__
void mat_mul(double *A,double *B,double *C,int *p_M,int *p_N,int *p_K)
{
//__syncthreads();
int M,N,K;
M=*p_M;
N=*p_N;
K=*p_K;
// get self idx
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
if (i>=M||j>=K) return;
// cal
C[i*K+j]=0.0;
for (int n=0;n<N;n++) C[i*K+j]+=A[i*N+n]*B[n*K+j];
}
int main(int argc, char *argv[])
{
double *A,*B,*C;
double *device_A,*device_B,*device_C;
// input settings
int blockNum;
std::cout<<"input block number: ";
std::cin>>blockNum;
std::cout<<std::endl;
int M,N,K;
std::cout<<"input M,N,K: ";
std::cin>>M>>N>>K;
std::cout<<std::endl;
// host's space
A=new double[M*N];
B=new double[N*K];
C=new double[M*K];
// initialize
srand(time(NULL));
int tmp=M*N;
for (int i=0;i<tmp;i++) A[i]=(double)(rand()%6400)/100;
tmp=N*K;
for (int i=0;i<tmp;i++) B[i]=(double)(rand()%6400)/100;
bzero(C,M*K*sizeof(double));
for (int i=0;i<M;i++)
{
for (int j=0;j<N;j++)
{
std::cout<<A[i*N+j]<<' ';
}
std::cout<<std::endl;
}
std::cout<<"=================================="<<std::endl;
for (int i=0;i<N;i++)
{
for (int j=0;j<K;j++)
{
std::cout<<B[i*K+j]<<' ';
}
std::cout<<std::endl;
}
std::cout<<"=================================="<<std::endl;
// device's space
hipMalloc((void **)&device_A,sizeof(double)*M*N);
hipMalloc((void **)&device_B,sizeof(double)*N*K);
hipMalloc((void **)&device_C,sizeof(double)*M*K);
int *device_M,*device_N,*device_K;
hipMalloc((void **)&device_M,sizeof(int));
hipMalloc((void **)&device_N,sizeof(int));
hipMalloc((void **)&device_K,sizeof(int));
auto start=system_clock::now();
// pass value
hipMemcpy(device_A,A,sizeof(double)*M*N,hipMemcpyHostToDevice);
hipMemcpy(device_B,B,sizeof(double)*N*K,hipMemcpyHostToDevice);
hipMemcpy(device_M,&M,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(device_N,&N,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(device_K,&K,sizeof(int),hipMemcpyHostToDevice);
dim3 grid(M/blockNum+1,K);
dim3 block(blockNum);
hipLaunchKernelGGL(( mat_mul), dim3(grid),dim3(block), 0, 0, device_A,device_B,device_C,device_M,device_N,device_K);
hipMemcpy(C,device_C,sizeof(double)*M*K,hipMemcpyDeviceToHost);
auto end=system_clock::now();
auto duration=duration_cast<microseconds>(end-start);
std::stringstream ss;
ss<<"echo '";
ss<<"M="<<M<<",N="<<N<<",K="<<K<<"\t";
ss<<"blocks="<<blockNum<<",time="<<std::fixed<<double(duration.count())<<"us";
ss<<"' >> CUDA_GEMM.log";
std::string cmd=ss.str();
std::cout<<cmd<<std::endl;
system(cmd.c_str());
for (int i=0;i<M;i++)
{
for (int j=0;j<K;j++)
{
std::cout<<C[i*K+j]<<' ';
}
std::cout<<std::endl;
}
checkMatrix(A,B,C,M,N,K);
delete[] A;
delete[] B;
delete[] C;
} | c7d3ae147a49d9b1deac6f07af0b21c7bf006b2d.cu | #include <iostream>
#include <sstream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
#include "check.hpp"
using namespace std::chrono;
__global__
void mat_mul(double *A,double *B,double *C,int *p_M,int *p_N,int *p_K)
{
//__syncthreads();
int M,N,K;
M=*p_M;
N=*p_N;
K=*p_K;
// get self idx
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
if (i>=M||j>=K) return;
// cal
C[i*K+j]=0.0;
for (int n=0;n<N;n++) C[i*K+j]+=A[i*N+n]*B[n*K+j];
}
int main(int argc, char *argv[])
{
double *A,*B,*C;
double *device_A,*device_B,*device_C;
// input settings
int blockNum;
std::cout<<"input block number: ";
std::cin>>blockNum;
std::cout<<std::endl;
int M,N,K;
std::cout<<"input M,N,K: ";
std::cin>>M>>N>>K;
std::cout<<std::endl;
// host's space
A=new double[M*N];
B=new double[N*K];
C=new double[M*K];
// initialize
srand(time(NULL));
int tmp=M*N;
for (int i=0;i<tmp;i++) A[i]=(double)(rand()%6400)/100;
tmp=N*K;
for (int i=0;i<tmp;i++) B[i]=(double)(rand()%6400)/100;
bzero(C,M*K*sizeof(double));
for (int i=0;i<M;i++)
{
for (int j=0;j<N;j++)
{
std::cout<<A[i*N+j]<<' ';
}
std::cout<<std::endl;
}
std::cout<<"=================================="<<std::endl;
for (int i=0;i<N;i++)
{
for (int j=0;j<K;j++)
{
std::cout<<B[i*K+j]<<' ';
}
std::cout<<std::endl;
}
std::cout<<"=================================="<<std::endl;
// device's space
cudaMalloc((void **)&device_A,sizeof(double)*M*N);
cudaMalloc((void **)&device_B,sizeof(double)*N*K);
cudaMalloc((void **)&device_C,sizeof(double)*M*K);
int *device_M,*device_N,*device_K;
cudaMalloc((void **)&device_M,sizeof(int));
cudaMalloc((void **)&device_N,sizeof(int));
cudaMalloc((void **)&device_K,sizeof(int));
auto start=system_clock::now();
// pass value
cudaMemcpy(device_A,A,sizeof(double)*M*N,cudaMemcpyHostToDevice);
cudaMemcpy(device_B,B,sizeof(double)*N*K,cudaMemcpyHostToDevice);
cudaMemcpy(device_M,&M,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(device_N,&N,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(device_K,&K,sizeof(int),cudaMemcpyHostToDevice);
dim3 grid(M/blockNum+1,K);
dim3 block(blockNum);
mat_mul<<<grid,block>>>(device_A,device_B,device_C,device_M,device_N,device_K);
cudaMemcpy(C,device_C,sizeof(double)*M*K,cudaMemcpyDeviceToHost);
auto end=system_clock::now();
auto duration=duration_cast<microseconds>(end-start);
std::stringstream ss;
ss<<"echo '";
ss<<"M="<<M<<",N="<<N<<",K="<<K<<"\t";
ss<<"blocks="<<blockNum<<",time="<<std::fixed<<double(duration.count())<<"us";
ss<<"' >> CUDA_GEMM.log";
std::string cmd=ss.str();
std::cout<<cmd<<std::endl;
system(cmd.c_str());
for (int i=0;i<M;i++)
{
for (int j=0;j<K;j++)
{
std::cout<<C[i*K+j]<<' ';
}
std::cout<<std::endl;
}
checkMatrix(A,B,C,M,N,K);
delete[] A;
delete[] B;
delete[] C;
} |
0de079d161c45780b5d4c330f8d5e37b33551417.hip | // !!! This is a file automatically generated by hipify!!!
# include <time.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <hip/hip_runtime.h>
# include <rocblas.h>
# include <cusolverDn.h>
# define BILLION 1000000000 L ;
int main (int argc , char * argv [])
{
struct timespec start , stop ; // variables for timing
double accum ; // elapsed time variable
hipsolverDnHandle_t cusolverH ; // cusolver handle
hipblasHandle_t cublasH ; // cublas handle
hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS ;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS ;
hipError_t cudaStat = hipSuccess ;
const int m = 2048; // number of rows of A
const int n = 2048; // number of columns of A
const int lda = m; // leading dimension of A
// declare the factorized matrix A, orthogonal matrices U, VT
float *A, *U, *VT , *S; // and sing .val. matrix S on the host
A=( float *) malloc (lda*n* sizeof ( float ));
U=( float *) malloc (lda*m* sizeof ( float ));
VT =( float *) malloc (lda *n* sizeof ( float ));
S= ( float *) malloc (n* sizeof ( float ));
for(int i=0;i<lda*n;i++) A[i]= rand ()/( float ) RAND_MAX ;
// the factorized matrix d_A , orthogonal matrices d_U , d_VT
float *d_A , *d_U , *d_VT , *d_S; // and sing .val. matrix d_S
int * devInfo ; // on the device
float *d_work , * d_rwork ; // workspace on the device
float *d_W; // auxiliary device array (d_W = d_S* d_VT )
int lwork = 0;
int info_gpu = 0; // info copied from device to host
const float h_one = 1;
const float h_minus_one = -1;
// create cusolver and cublas handle
cusolver_status = hipsolverDnCreate (& cusolverH );
cublas_status = hipblasCreate (& cublasH );
// prepare memory on the device
cudaStat = hipMalloc (( void **)& d_A , sizeof ( float )* lda*n);
cudaStat = hipMalloc (( void **)& d_S , sizeof ( float )*n);
cudaStat = hipMalloc (( void **)& d_U , sizeof ( float )* lda*m);
cudaStat = hipMalloc (( void **)& d_VT , sizeof ( float )* lda*n);
cudaStat = hipMalloc (( void **)& devInfo , sizeof (int ));
cudaStat = hipMalloc (( void **)& d_W , sizeof ( float )* lda*n);
cudaStat = hipMemcpy (d_A , A, sizeof ( float )* lda*n,
hipMemcpyHostToDevice ); // copy A- >d_A
// compute buffer size and prepare workspace
cusolver_status = hipsolverDnSgesvd_bufferSize ( cusolverH ,m,n,
& lwork );
cudaStat = hipMalloc (( void **)& d_work , sizeof ( float )* lwork );
// compute the singular value decomposition of d_A
// and optionally the left and right singular vectors :
// d_A = d_U *d_S * d_VT ; the diagonal elements of d_S
// are the singular values of d_A in descending order
// the first min (m,n) columns of d_U contain the left sing .vec .
// the first min (m,n) cols of d_VT contain the right sing .vec .
signed char jobu = A; // all m columns of d_U returned
signed char jobvt = A; // all n columns of d_VT returned
clock_gettime ( CLOCK_REALTIME ,& start ); // start timer
cusolver status = hipsolverDnSgesvd (cusolverH, jobu, jobvt,
m, n, d A, lda, d S, d U, lda, d VT, lda, d work, lwork,
d rwork, devInfo);
cudaStat = hipDeviceSynchronize ();
clock_gettime ( CLOCK_REALTIME ,& stop ); // stop timer
accum =( stop .tv_sec - start . tv_sec )+ // elapsed time
( stop . tv_nsec - start . tv_nsec )/( double ) BILLION ;
printf ("SVD time : %lf sec .\n",accum ); // print elapsed time
cudaStat = hipMemcpy (U,d_U , sizeof ( float )* lda*m,
hipMemcpyDeviceToHost ); // copy d_U - >U
cudaStat = hipMemcpy (VT ,d_VT , sizeof ( float )* lda*n,
hipMemcpyDeviceToHost ); // copy d_VT - >VT
cudaStat = hipMemcpy (S,d_S , sizeof ( float )*n,
hipMemcpyDeviceToHost ); // copy d_S - >S
cudaStat = hipMemcpy (& info_gpu , devInfo , sizeof (int) ,
hipMemcpyDeviceToHost ); // devInfo - > info_gpu
printf (" after gesvd : info_gpu = %d\n", info_gpu );
// multiply d_VT by the diagonal matrix corresponding to d_S
cublas_status = hipblasSdgmm ( cublasH , HIPBLAS_SIDE_LEFT ,n,n,
d_VT , lda , d_S , 1 , d_W , lda ); // d_W =d_S * d_VT
cudaStat = hipMemcpy (d_A ,A, sizeof ( float )* lda*n,
hipMemcpyHostToDevice ); // copy A- >d_A
// compute the difference d_A -d_U *d_S * d_VT
cublas_status = hipblasSgemm ( cublasH , HIPBLAS_OP_N , HIPBLAS_OP_N ,
m, n, n, & h_minus_one ,d_U , lda , d_W , lda , &h_one , d_A , lda );
float dR_fro = 0.0; // variable for the norm
// compute the norm of the difference d_A -d_U *d_S * d_VT
cublas_status = hipblasSnrm2 ( cublasH ,lda*n,d_A ,1 ,& dR_fro );
printf ("|A - U*S*VT| = %E \n", dR_fro ); // print the norm
// free memory
hipFree (d_A );
hipFree (d_S );
hipFree (d_U );
hipFree ( d_VT );
hipFree ( devInfo );
hipFree ( d_work );
hipFree ( d_rwork );
hipFree (d_W );
hipblasDestroy ( cublasH );
hipsolverDnDestroy ( cusolverH );
hipDeviceReset ();
return 0;
}
| 0de079d161c45780b5d4c330f8d5e37b33551417.cu | # include <time.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <cuda_runtime.h>
# include <cublas_v2.h>
# include <cusolverDn.h>
# define BILLION 1000000000 L ;
int main (int argc , char * argv [])
{
struct timespec start , stop ; // variables for timing
double accum ; // elapsed time variable
cusolverDnHandle_t cusolverH ; // cusolver handle
cublasHandle_t cublasH ; // cublas handle
cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS ;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS ;
cudaError_t cudaStat = cudaSuccess ;
const int m = 2048; // number of rows of A
const int n = 2048; // number of columns of A
const int lda = m; // leading dimension of A
// declare the factorized matrix A, orthogonal matrices U, VT
float *A, *U, *VT , *S; // and sing .val. matrix S on the host
A=( float *) malloc (lda*n* sizeof ( float ));
U=( float *) malloc (lda*m* sizeof ( float ));
VT =( float *) malloc (lda *n* sizeof ( float ));
S= ( float *) malloc (n* sizeof ( float ));
for(int i=0;i<lda*n;i++) A[i]= rand ()/( float ) RAND_MAX ;
// the factorized matrix d_A , orthogonal matrices d_U , d_VT
float *d_A , *d_U , *d_VT , *d_S; // and sing .val. matrix d_S
int * devInfo ; // on the device
float *d_work , * d_rwork ; // workspace on the device
float *d_W; // auxiliary device array (d_W = d_S* d_VT )
int lwork = 0;
int info_gpu = 0; // info copied from device to host
const float h_one = 1;
const float h_minus_one = -1;
// create cusolver and cublas handle
cusolver_status = cusolverDnCreate (& cusolverH );
cublas_status = cublasCreate (& cublasH );
// prepare memory on the device
cudaStat = cudaMalloc (( void **)& d_A , sizeof ( float )* lda*n);
cudaStat = cudaMalloc (( void **)& d_S , sizeof ( float )*n);
cudaStat = cudaMalloc (( void **)& d_U , sizeof ( float )* lda*m);
cudaStat = cudaMalloc (( void **)& d_VT , sizeof ( float )* lda*n);
cudaStat = cudaMalloc (( void **)& devInfo , sizeof (int ));
cudaStat = cudaMalloc (( void **)& d_W , sizeof ( float )* lda*n);
cudaStat = cudaMemcpy (d_A , A, sizeof ( float )* lda*n,
cudaMemcpyHostToDevice ); // copy A- >d_A
// compute buffer size and prepare workspace
cusolver_status = cusolverDnSgesvd_bufferSize ( cusolverH ,m,n,
& lwork );
cudaStat = cudaMalloc (( void **)& d_work , sizeof ( float )* lwork );
// compute the singular value decomposition of d_A
// and optionally the left and right singular vectors :
// d_A = d_U *d_S * d_VT ; the diagonal elements of d_S
// are the singular values of d_A in descending order
// the first min (m,n) columns of d_U contain the left sing .vec .
// the first min (m,n) cols of d_VT contain the right sing .vec .
signed char jobu = ’A’; // all m columns of d_U returned
signed char jobvt = ’A’; // all n columns of d_VT returned
clock_gettime ( CLOCK_REALTIME ,& start ); // start timer
cusolver status = cusolverDnSgesvd (cusolverH, jobu, jobvt,
m, n, d A, lda, d S, d U, lda, d VT, lda, d work, lwork,
d rwork, devInfo);
cudaStat = cudaDeviceSynchronize ();
clock_gettime ( CLOCK_REALTIME ,& stop ); // stop timer
accum =( stop .tv_sec - start . tv_sec )+ // elapsed time
( stop . tv_nsec - start . tv_nsec )/( double ) BILLION ;
printf ("SVD time : %lf sec .\n",accum ); // print elapsed time
cudaStat = cudaMemcpy (U,d_U , sizeof ( float )* lda*m,
cudaMemcpyDeviceToHost ); // copy d_U - >U
cudaStat = cudaMemcpy (VT ,d_VT , sizeof ( float )* lda*n,
cudaMemcpyDeviceToHost ); // copy d_VT - >VT
cudaStat = cudaMemcpy (S,d_S , sizeof ( float )*n,
cudaMemcpyDeviceToHost ); // copy d_S - >S
cudaStat = cudaMemcpy (& info_gpu , devInfo , sizeof (int) ,
cudaMemcpyDeviceToHost ); // devInfo - > info_gpu
printf (" after gesvd : info_gpu = %d\n", info_gpu );
// multiply d_VT by the diagonal matrix corresponding to d_S
cublas_status = cublasSdgmm ( cublasH , CUBLAS_SIDE_LEFT ,n,n,
d_VT , lda , d_S , 1 , d_W , lda ); // d_W =d_S * d_VT
cudaStat = cudaMemcpy (d_A ,A, sizeof ( float )* lda*n,
cudaMemcpyHostToDevice ); // copy A- >d_A
// compute the difference d_A -d_U *d_S * d_VT
cublas_status = cublasSgemm_v2 ( cublasH , CUBLAS_OP_N , CUBLAS_OP_N ,
m, n, n, & h_minus_one ,d_U , lda , d_W , lda , &h_one , d_A , lda );
float dR_fro = 0.0; // variable for the norm
// compute the norm of the difference d_A -d_U *d_S * d_VT
cublas_status = cublasSnrm2_v2 ( cublasH ,lda*n,d_A ,1 ,& dR_fro );
printf ("|A - U*S*VT| = %E \n", dR_fro ); // print the norm
// free memory
cudaFree (d_A );
cudaFree (d_S );
cudaFree (d_U );
cudaFree ( d_VT );
cudaFree ( devInfo );
cudaFree ( d_work );
cudaFree ( d_rwork );
cudaFree (d_W );
cublasDestroy ( cublasH );
cusolverDnDestroy ( cusolverH );
cudaDeviceReset ();
return 0;
}
|
9609f4f6974ca4652a2657249287e20cb5fb4c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector Addition: C = A + B.
* Device code.
*/
#ifndef _VECTORADD_KERNEL_H_
#define _VECTORADD_KERNEL_H_
#include <stdio.h>
#include "vectoradd.h"
// Vector addition kernel thread specification
__global__ void VectorAddKernel(Vector A, Vector B, Vector C)
{
//Add the two vectors
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < VSIZE) {
C.elements[i] = A.elements[i] + B.elements[i];
}
}
#endif // #ifndef _VECTORADD_KERNEL_H_
| 9609f4f6974ca4652a2657249287e20cb5fb4c5d.cu | /* Vector Addition: C = A + B.
* Device code.
*/
#ifndef _VECTORADD_KERNEL_H_
#define _VECTORADD_KERNEL_H_
#include <stdio.h>
#include "vectoradd.h"
// Vector addition kernel thread specification
__global__ void VectorAddKernel(Vector A, Vector B, Vector C)
{
//Add the two vectors
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i < VSIZE) {
C.elements[i] = A.elements[i] + B.elements[i];
}
}
#endif // #ifndef _VECTORADD_KERNEL_H_
|
1b42118c0001a8b9baefe35d9db11354f35c8297.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* blockAndThread.cu
* description: fill two arrays with blockId and threadId values
* notes: compile with nvcc and parent code:
* "nvcc blockAndThread.c blockAndThread.cu"
* Program is similar to one that appears in Dr. Dobbs Journal.
* The tutorial is available at:
* http://www.ddj.com/hpc-high-performance-computing/207200659
* Also used Andrew Bellenir's matrix multiplication program
**/
#include <stdio.h>
#include <stdlib.h>
/*
* In CUDA it is necessary to define block sizes
* The grid of data that will be worked on is divided into blocks
*/
#define BLOCK_SIZE 8
/**
* The function that will be executed in each stream processors
* The __global__ directive identifies this function as being
* an executable kernel on the CUDA device.
* All kernesl must be declared with a return type void
*/
__global__ void cu_fillArray(int *block_d,int *thread_d){
int x;
/* blockIdx.x is a built-in variable in CUDA
that returns the blockId in the x axis.
threadIdx.x is another built-in variable in CUDA
that returns the threadId in the x axis
of the thread that is being executed by the
stream processor accessing this particular block
*/
x=blockIdx.x*BLOCK_SIZE+threadIdx.x;
block_d[x] = blockIdx.x;
thread_d[x] = threadIdx.x;
}
/**
* This function is called from the host computer.
* It calls the function that is executed on the GPU.
* Recall that:
* The host computer and the GPU have separate memories
* Hence it is necessary to
* - Allocate memory in the GPU
* - Copy the variables that will be operated on from host
* memory to the corresponding variables in the GPU
* - Describe the configuration of the grid and the block size
* - Call the kernel, the code that will be executed on the GPU
* - Once the kernel has finished executing, copy the results
* back from GPU memory to host memory
*/
extern "C" void fillArray(int *block,int *thread,int arraySize){
//block_d and thread_d are the GPU counterparts of the arrays that exist in the host memory
int *block_d;
int *thread_d;
hipError_t result;
//allocate memory on device
// hipMalloc allocates space in the memory of the GPU
result = hipMalloc((void**)&block_d,sizeof(int)*arraySize);
if (result != hipSuccess) {
printf("hipMalloc - block_d - failed\n");
exit(1);
}
result = hipMalloc((void**)&thread_d,sizeof(int)*arraySize);
if (result != hipSuccess) {
printf("hipMalloc - thread_d - failed\n");
exit(1);
}
//copy the arrays into the variable array_d in the device
result = hipMemcpy(block_d,block,sizeof(int)*arraySize,hipMemcpyHostToDevice);
if (result != hipSuccess) {
printf("hipMemcpy - host-GPU - block - failed\n");
exit(1);
}
result = hipMemcpy(thread_d,thread,sizeof(int)*arraySize,hipMemcpyHostToDevice);
if (result != hipSuccess) {
printf("hipMemcpy - host-GPU - thread - failed\n");
exit(1);
}
//execution configuration...
// Indicate the dimension of the block
dim3 dimblock(BLOCK_SIZE);
// Indicate the dimension of the grid in blocks
dim3 dimgrid(arraySize/BLOCK_SIZE);
//actual computation: Call the kernel
hipLaunchKernelGGL(( cu_fillArray), dim3(dimgrid),dim3(dimblock), 0, 0, block_d,thread_d);
// read results back:
// Copy the results from the memory in the GPU back to the memory on the host
result = hipMemcpy(block,block_d,sizeof(int)*arraySize,hipMemcpyDeviceToHost);
if (result != hipSuccess) {
printf("hipMemcpy - GPU-host - block - failed\n");
exit(1);
}
result = hipMemcpy(thread,thread_d,sizeof(int)*arraySize,hipMemcpyDeviceToHost);
if (result != hipSuccess) {
printf("hipMemcpy - GPU-host - thread - failed\n");
exit(1);
}
// Release the memory on the GPU
hipFree(block_d);
hipFree(thread_d);
}
| 1b42118c0001a8b9baefe35d9db11354f35c8297.cu | /**
* blockAndThread.cu
* description: fill two arrays with blockId and threadId values
* notes: compile with nvcc and parent code:
* "nvcc blockAndThread.c blockAndThread.cu"
* Program is similar to one that appears in Dr. Dobbs Journal.
* The tutorial is available at:
* http://www.ddj.com/hpc-high-performance-computing/207200659
* Also used Andrew Bellenir's matrix multiplication program
**/
#include <stdio.h>
#include <stdlib.h>
/*
* In CUDA it is necessary to define block sizes
* The grid of data that will be worked on is divided into blocks
*/
#define BLOCK_SIZE 8
/**
* The function that will be executed in each stream processors
* The __global__ directive identifies this function as being
* an executable kernel on the CUDA device.
* All kernesl must be declared with a return type void
*/
__global__ void cu_fillArray(int *block_d,int *thread_d){
int x;
/* blockIdx.x is a built-in variable in CUDA
that returns the blockId in the x axis.
threadIdx.x is another built-in variable in CUDA
that returns the threadId in the x axis
of the thread that is being executed by the
stream processor accessing this particular block
*/
x=blockIdx.x*BLOCK_SIZE+threadIdx.x;
block_d[x] = blockIdx.x;
thread_d[x] = threadIdx.x;
}
/**
* This function is called from the host computer.
* It calls the function that is executed on the GPU.
* Recall that:
* The host computer and the GPU have separate memories
* Hence it is necessary to
* - Allocate memory in the GPU
* - Copy the variables that will be operated on from host
* memory to the corresponding variables in the GPU
* - Describe the configuration of the grid and the block size
* - Call the kernel, the code that will be executed on the GPU
* - Once the kernel has finished executing, copy the results
* back from GPU memory to host memory
*/
extern "C" void fillArray(int *block,int *thread,int arraySize){
//block_d and thread_d are the GPU counterparts of the arrays that exist in the host memory
int *block_d;
int *thread_d;
cudaError_t result;
//allocate memory on device
// cudaMalloc allocates space in the memory of the GPU
result = cudaMalloc((void**)&block_d,sizeof(int)*arraySize);
if (result != cudaSuccess) {
printf("cudaMalloc - block_d - failed\n");
exit(1);
}
result = cudaMalloc((void**)&thread_d,sizeof(int)*arraySize);
if (result != cudaSuccess) {
printf("cudaMalloc - thread_d - failed\n");
exit(1);
}
//copy the arrays into the variable array_d in the device
result = cudaMemcpy(block_d,block,sizeof(int)*arraySize,cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
printf("cudaMemcpy - host-GPU - block - failed\n");
exit(1);
}
result = cudaMemcpy(thread_d,thread,sizeof(int)*arraySize,cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
printf("cudaMemcpy - host-GPU - thread - failed\n");
exit(1);
}
//execution configuration...
// Indicate the dimension of the block
dim3 dimblock(BLOCK_SIZE);
// Indicate the dimension of the grid in blocks
dim3 dimgrid(arraySize/BLOCK_SIZE);
//actual computation: Call the kernel
cu_fillArray<<<dimgrid,dimblock>>>(block_d,thread_d);
// read results back:
// Copy the results from the memory in the GPU back to the memory on the host
result = cudaMemcpy(block,block_d,sizeof(int)*arraySize,cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
printf("cudaMemcpy - GPU-host - block - failed\n");
exit(1);
}
result = cudaMemcpy(thread,thread_d,sizeof(int)*arraySize,cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
printf("cudaMemcpy - GPU-host - thread - failed\n");
exit(1);
}
// Release the memory on the GPU
cudaFree(block_d);
cudaFree(thread_d);
}
|
a5eb9cd99a9cf6a9fd4e7c4fefbd2640b4cbea95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! SparseSoftmaxCrossEntropy <Tx = ?, Ty = ?, Device = CUDA> */
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropy(
const int count,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const Tx* prob,
const Ty* labels,
const int* ignores,
Tx* losses,
int* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
losses[idx] = -log(
max(prob[(oix * axis_dim + label)
* inner_dim + iix], FLT_MIN)
);
flags[idx] = 1;
}
}
}
/*! SparseSoftmaxCrossEntropy <Tx = float32, Ty = float32, Device = CUDA> */
template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const float* labels,
const int* ignores,
float* losses,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, losses, flags);
}
/*! SparseSoftmaxCrossEntropy <Tx = float32, Ty = int64, Device = CUDA> */
template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const int64_t* labels,
const int* ignores,
float* losses,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, losses, flags);
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = ?, Ty = ?, Device = CUDA> */
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropyGrad(
const int count,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const Tx* prob,
const Ty* labels,
const int* ignores,
Tx* dx,
int* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
dx[(oix * axis_dim + label) * inner_dim + iix] -= 1;
flags[idx] = 1;
}
}
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = float32, Ty = float32, Device = CUDA> */
template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const float* labels,
const int* ignores,
float* dx,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, dx, flags);
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = float32, Ty = int64, Device = CUDA> */
template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const int64_t* labels,
const int* ignores,
float* dx,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, dx, flags);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | a5eb9cd99a9cf6a9fd4e7c4fefbd2640b4cbea95.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/cast.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! SparseSoftmaxCrossEntropy <Tx = ?, Ty = ?, Device = CUDA> */
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropy(
const int count,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const Tx* prob,
const Ty* labels,
const int* ignores,
Tx* losses,
int* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++) {
if (label == ignores[k]) {
losses[idx] = flags[idx] = 0;
break;
}
}
if (k == num_ignores) {
losses[idx] = -log(
max(prob[(oix * axis_dim + label)
* inner_dim + iix], FLT_MIN)
);
flags[idx] = 1;
}
}
}
/*! SparseSoftmaxCrossEntropy <Tx = float32, Ty = float32, Device = CUDA> */
template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const float* labels,
const int* ignores,
float* losses,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, losses, flags);
}
/*! SparseSoftmaxCrossEntropy <Tx = float32, Ty = int64, Device = CUDA> */
template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const int64_t* labels,
const int* ignores,
float* losses,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropy<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, losses, flags);
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = ?, Ty = ?, Device = CUDA> */
template <typename Tx, typename Ty>
__global__ void _SparseSoftmaxCrossEntropyGrad(
const int count,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const Tx* prob,
const Ty* labels,
const int* ignores,
Tx* dx,
int* flags) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int oix = idx / inner_dim;
const int iix = idx % inner_dim;
const int label = labels[oix * inner_dim + iix];
int k;
for (k = 0; k < num_ignores; k++)
if (label == ignores[k]) break;
if (k != num_ignores) {
for (int c = 0; c < axis_dim; c++)
dx[(oix * axis_dim + c) * inner_dim + iix] = 0;
flags[idx] = 0;
} else {
dx[(oix * axis_dim + label) * inner_dim + iix] -= 1;
flags[idx] = 1;
}
}
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = float32, Ty = float32, Device = CUDA> */
template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const float* labels,
const int* ignores,
float* dx,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, float>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, dx, flags);
}
/*! SparseSoftmaxCrossEntropyGrad <Tx = float32, Ty = int64, Device = CUDA> */
template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>(
const int outer_dim,
const int axis_dim,
const int inner_dim,
const int num_ignores,
const float* prob,
const int64_t* labels,
const int* ignores,
float* dx,
int* flags,
CUDAContext* ctx) {
const auto num_preds = outer_dim * inner_dim;
_SparseSoftmaxCrossEntropyGrad<float, int64_t>
<< < CUDA_BLOCKS(num_preds), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(num_preds, axis_dim, inner_dim, num_ignores,
prob, labels, ignores, dx, flags);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
d3d0248a5a493f91c2234f7dc156fd3a93651fb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <limits.h>
#include <stdio.h>
#ifdef SHARED_LEAK_ACROSS_BLOCKS
__device__ volatile int flag = 0;
#endif
#ifdef TO_GLOBAL_PTR
__device__ int *p;
#endif
__global__ void simple_kernel() {
#ifdef TO_SHARED_PTR
__shared__ int *p;
#endif
#ifdef LOCAL_LEAK_WITHIN_WARP
int i;
if (threadIdx.x == 0)
p = &i;
if (threadIdx.x == 1)
i = *p;
#elif SHARED_LEAK_ACROSS_BLOCKS
__shared__ int i;
if (blockIdx.x == 0) {
p = &i;
__threadfence();
flag = 1;
}
if (blockIdx.x == 1) {
while (flag != 1);
__threadfence();
i = *p;
}
#endif
}
int main() {
#ifdef LOCAL_LEAK_WITHIN_WARP
hipLaunchKernelGGL(( simple_kernel), dim3(1),dim3(2), 0, 0, );
#elif SHARED_LEAK_ACROSS_BLOCKS
hipLaunchKernelGGL(( simple_kernel), dim3(2),dim3(1), 0, 0, );
#endif
hipDeviceReset();
return 0;
}
| d3d0248a5a493f91c2234f7dc156fd3a93651fb3.cu | #include <limits.h>
#include <stdio.h>
#ifdef SHARED_LEAK_ACROSS_BLOCKS
__device__ volatile int flag = 0;
#endif
#ifdef TO_GLOBAL_PTR
__device__ int *p;
#endif
__global__ void simple_kernel() {
#ifdef TO_SHARED_PTR
__shared__ int *p;
#endif
#ifdef LOCAL_LEAK_WITHIN_WARP
int i;
if (threadIdx.x == 0)
p = &i;
if (threadIdx.x == 1)
i = *p;
#elif SHARED_LEAK_ACROSS_BLOCKS
__shared__ int i;
if (blockIdx.x == 0) {
p = &i;
__threadfence();
flag = 1;
}
if (blockIdx.x == 1) {
while (flag != 1);
__threadfence();
i = *p;
}
#endif
}
int main() {
#ifdef LOCAL_LEAK_WITHIN_WARP
simple_kernel<<<1,2>>>();
#elif SHARED_LEAK_ACROSS_BLOCKS
simple_kernel<<<2,1>>>();
#endif
cudaDeviceReset();
return 0;
}
|
f98d419b6ed2f8184221cf3b8852227f013deb2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = 0;
numCColumns = 0;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| f98d419b6ed2f8184221cf3b8852227f013deb2c.cu | #include <wb.h>
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float * A, float * B, float * C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostA; // The A matrix
float * hostB; // The B matrix
float * hostC; // The output C matrix
float * deviceA;
float * deviceB;
float * deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = 0;
numCColumns = 0;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
093fa2fbef0b0c072837e4304c6dd52987aaafd2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__global__ void ConcatTensorToTensor(const int tensor_add_sequence_length,
const T* tensor_in,
const T* tensor_add,
T* tensor_out) {
const int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int chunk_id = blockIdx.z;
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int H = blockDim.x;
// K: number of identical tensors
// tensor_in: K x BxNxS'xH
// tensor_add: K x BxNxSxH
// tensor_out: K x BxNx(S'+S)xH
const int tensor_in_sequence_length = all_sequence_length - tensor_add_sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
int out_offset = b * present_NSH + n * present_SH + s * H + h + chunk_id * (present_NSH * batch_size);
if (s < tensor_in_sequence_length) {
const int past_SH = tensor_in_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + chunk_id * (past_NSH * batch_size);
tensor_out[out_offset] = tensor_in[in_offset];
} else if (s < all_sequence_length) {
const int SH = tensor_add_sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - tensor_in_sequence_length) * H + h + chunk_id * (NSH * batch_size);
tensor_out[out_offset] = tensor_add[in_offset];
}
}
template <typename T>
__global__ void ConcatTensorToTensorLarge(const int tensor_add_sequence_length,
const int H,
const T* tensor_in,
const T* tensor_add,
T* tensor_out) {
// Use when (H*)*num_heads > 1024
int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int chunk_id = blockIdx.z;
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int stride = blockDim.x;
// K: number of identical tensor
// tensor_in: K x BxNxS'xH
// tensor_add: K x BxNxSxH
// tensor_out: K x BxNx(S'+S)xH
const int tensor_in_sequence_length = all_sequence_length - tensor_add_sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
while (h < H) {
int out_offset = b * present_NSH + n * present_SH + s * H + h + chunk_id * (present_NSH * batch_size);
if (s < tensor_in_sequence_length) {
const int past_SH = tensor_in_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + chunk_id * (past_NSH * batch_size);
tensor_out[out_offset] = tensor_in[in_offset];
} else if (s < all_sequence_length) {
const int SH = tensor_add_sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - tensor_in_sequence_length) * H + h + chunk_id * (NSH * batch_size);
tensor_out[out_offset] = tensor_add[in_offset];
}
h += stride;
}
}
bool LaunchConcatTensorToTensor(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const int matrix_num,
const float* tensor_in,
const float* tensor_add,
float* tensor_out) {
const dim3 grid(all_sequence_length, batch_size, matrix_num);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensor<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensorLarge<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, H, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
}
} else {
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensor<float>), dim3(grid), dim3(block), 0, stream, sequence_length, tensor_in, tensor_add, tensor_out);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensorLarge<float>), dim3(grid), dim3(block), 0, stream, sequence_length, head_size, tensor_in, tensor_add, tensor_out);
}
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchConcatTensorToTensor(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const int matrix_num,
const half* tensor_in,
const half* tensor_add,
half* tensor_out) {
const dim3 grid(all_sequence_length, batch_size, matrix_num);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensor<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensorLarge<float2>), dim3(grid), dim3(block), 0, stream, sequence_length, H, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
}
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensor<half2>), dim3(grid), dim3(block), 0, stream, sequence_length, reinterpret_cast<const half2*>(tensor_in), reinterpret_cast<const half2*>(tensor_add), reinterpret_cast<half2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensorLarge<half2>), dim3(grid), dim3(block), 0, stream, sequence_length, H, reinterpret_cast<const half2*>(tensor_in), reinterpret_cast<const half2*>(tensor_add), reinterpret_cast<half2*>(tensor_out));
}
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensor<half>), dim3(grid), dim3(block), 0, stream, sequence_length, tensor_in, tensor_add, tensor_out);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
hipLaunchKernelGGL(( ConcatTensorToTensorLarge<half>), dim3(grid), dim3(block), 0, stream, sequence_length, head_size, tensor_in, tensor_add, tensor_out);
}
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchConcatPastToPresent(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const float* past,
const float* k_v,
float* present) {
return LaunchConcatTensorToTensor(
stream,
all_sequence_length,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
2,
past,
k_v,
present);
}
bool LaunchConcatPastToPresent(hipStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const half* past,
const half* k_v,
half* present) {
return LaunchConcatTensorToTensor(
stream,
all_sequence_length,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
2,
past,
k_v,
present);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 093fa2fbef0b0c072837e4304c6dd52987aaafd2.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T>
__global__ void ConcatTensorToTensor(const int tensor_add_sequence_length,
const T* tensor_in,
const T* tensor_add,
T* tensor_out) {
const int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int chunk_id = blockIdx.z;
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int H = blockDim.x;
// K: number of identical tensors
// tensor_in: K x BxNxS'xH
// tensor_add: K x BxNxSxH
// tensor_out: K x BxNx(S'+S)xH
const int tensor_in_sequence_length = all_sequence_length - tensor_add_sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
int out_offset = b * present_NSH + n * present_SH + s * H + h + chunk_id * (present_NSH * batch_size);
if (s < tensor_in_sequence_length) {
const int past_SH = tensor_in_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + chunk_id * (past_NSH * batch_size);
tensor_out[out_offset] = tensor_in[in_offset];
} else if (s < all_sequence_length) {
const int SH = tensor_add_sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - tensor_in_sequence_length) * H + h + chunk_id * (NSH * batch_size);
tensor_out[out_offset] = tensor_add[in_offset];
}
}
template <typename T>
__global__ void ConcatTensorToTensorLarge(const int tensor_add_sequence_length,
const int H,
const T* tensor_in,
const T* tensor_add,
T* tensor_out) {
// Use when (H*)*num_heads > 1024
int h = threadIdx.x;
const int n = threadIdx.y;
const int s = blockIdx.x;
const int b = blockIdx.y;
const int chunk_id = blockIdx.z;
const int all_sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int num_heads = blockDim.y;
const int stride = blockDim.x;
// K: number of identical tensor
// tensor_in: K x BxNxS'xH
// tensor_add: K x BxNxSxH
// tensor_out: K x BxNx(S'+S)xH
const int tensor_in_sequence_length = all_sequence_length - tensor_add_sequence_length;
const int present_SH = all_sequence_length * H;
const int present_NSH = num_heads * present_SH;
while (h < H) {
int out_offset = b * present_NSH + n * present_SH + s * H + h + chunk_id * (present_NSH * batch_size);
if (s < tensor_in_sequence_length) {
const int past_SH = tensor_in_sequence_length * H;
const int past_NSH = num_heads * past_SH;
const int in_offset = b * past_NSH + n * past_SH + s * H + h + chunk_id * (past_NSH * batch_size);
tensor_out[out_offset] = tensor_in[in_offset];
} else if (s < all_sequence_length) {
const int SH = tensor_add_sequence_length * H;
const int NSH = num_heads * SH;
const int in_offset = b * NSH + n * SH + (s - tensor_in_sequence_length) * H + h + chunk_id * (NSH * batch_size);
tensor_out[out_offset] = tensor_add[in_offset];
}
h += stride;
}
}
bool LaunchConcatTensorToTensor(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const int matrix_num,
const float* tensor_in,
const float* tensor_add,
float* tensor_out) {
const dim3 grid(all_sequence_length, batch_size, matrix_num);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
ConcatTensorToTensor<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
ConcatTensorToTensorLarge<float2><<<grid, block, 0, stream>>>(sequence_length, H, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
}
} else {
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
ConcatTensorToTensor<float><<<grid, block, 0, stream>>>(sequence_length, tensor_in, tensor_add, tensor_out);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
ConcatTensorToTensorLarge<float><<<grid, block, 0, stream>>>(sequence_length, head_size, tensor_in, tensor_add, tensor_out);
}
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchConcatTensorToTensor(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const int matrix_num,
const half* tensor_in,
const half* tensor_add,
half* tensor_out) {
const dim3 grid(all_sequence_length, batch_size, matrix_num);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
ConcatTensorToTensor<float2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
ConcatTensorToTensorLarge<float2><<<grid, block, 0, stream>>>(sequence_length, H, reinterpret_cast<const float2*>(tensor_in), reinterpret_cast<const float2*>(tensor_add), reinterpret_cast<float2*>(tensor_out));
}
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
if (H * num_heads <= max_threads_per_block) {
const dim3 block(H, num_heads, 1);
ConcatTensorToTensor<half2><<<grid, block, 0, stream>>>(sequence_length, reinterpret_cast<const half2*>(tensor_in), reinterpret_cast<const half2*>(tensor_add), reinterpret_cast<half2*>(tensor_out));
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
ConcatTensorToTensorLarge<half2><<<grid, block, 0, stream>>>(sequence_length, H, reinterpret_cast<const half2*>(tensor_in), reinterpret_cast<const half2*>(tensor_add), reinterpret_cast<half2*>(tensor_out));
}
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
if (head_size * num_heads <= max_threads_per_block) {
const dim3 block(head_size, num_heads, 1);
ConcatTensorToTensor<half><<<grid, block, 0, stream>>>(sequence_length, tensor_in, tensor_add, tensor_out);
} else {
const dim3 block(max_threads_per_block / num_heads, num_heads, 1);
ConcatTensorToTensorLarge<half><<<grid, block, 0, stream>>>(sequence_length, head_size, tensor_in, tensor_add, tensor_out);
}
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchConcatPastToPresent(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const float* past,
const float* k_v,
float* present) {
return LaunchConcatTensorToTensor(
stream,
all_sequence_length,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
2,
past,
k_v,
present);
}
bool LaunchConcatPastToPresent(cudaStream_t stream,
const int all_sequence_length,
const int sequence_length,
const int batch_size,
const int head_size,
const int num_heads,
const int max_threads_per_block,
const half* past,
const half* k_v,
half* present) {
return LaunchConcatTensorToTensor(
stream,
all_sequence_length,
sequence_length,
batch_size,
head_size,
num_heads,
max_threads_per_block,
2,
past,
k_v,
present);
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
7b42dc23a782f73df49620718194f9f74fb57e12.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_kgauss64sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int xrows = 1;
int xcols = 1;
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *xx = NULL;
hipMalloc(&xx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_kgauss64sum), dim3(gridBlock),dim3(threadBlock), 0, 0, xrows,xcols,x,xx);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_kgauss64sum), dim3(gridBlock),dim3(threadBlock), 0, 0, xrows,xcols,x,xx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_kgauss64sum), dim3(gridBlock),dim3(threadBlock), 0, 0, xrows,xcols,x,xx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7b42dc23a782f73df49620718194f9f74fb57e12.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_kgauss64sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int xrows = 1;
int xcols = 1;
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *xx = NULL;
cudaMalloc(&xx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_kgauss64sum<<<gridBlock,threadBlock>>>(xrows,xcols,x,xx);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_kgauss64sum<<<gridBlock,threadBlock>>>(xrows,xcols,x,xx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_kgauss64sum<<<gridBlock,threadBlock>>>(xrows,xcols,x,xx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6e0ab19bd6d946fb59bce678057215b92de0b524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"FlockSim.h"
#include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include<iostream>
#include<iomanip>
#include<time.h>
//#define NDEBUG
using namespace std;
FlockSim::FlockSim(int size,float wall_size)
{
wallx = wall_size;
wally = wall_size;
initialFlock(size);
hipMalloc((void**)&dev_flock,F.size*sizeof(Agent));
hipMemcpy(dev_flock, F.flock, F.size*sizeof(Agent),hipMemcpyHostToDevice);
}
__device__ float check_angle (float ang)
{
if (ang >= (float)360)
return ang - (float) 360;
if (ang < 0)
return ang + (float) 360;
else
return ang;
}
__global__ void update_flock_gpu (Agent* F, float wallx,float wally,int size,float dt)
{
int num = threadIdx.x + blockDim.x * blockIdx.x;
if (num < size)
{
F[num].x += cos(F[num].angle)*F[num].v*dt;
F[num].y += sin(F[num].angle)*F[num].v*dt;
if (F[num].x >= wallx/2 || F[num].x <= -wallx/2){
if (F[num].x >= wallx/2){F[num].x=wallx/2;}
else if(F[num].x<=-wallx/2){F[num].x=-wallx/2;}
F[num].angle = ((float)180.0 - F[num].angle);
}
if (F[num].y >= wally/2 || F[num].y <= -wally/2) {
if (F[num].y >= wally/2){F[num].y=wally/2;}
else if (F[num].y <= -wally/2){F[num].y=-wally/2;}
F[num].angle = (-(float)1.0* F[num].angle);
}
check_angle(F[num].angle);
}
}
void FlockSim::update_flock(float dt)
{
dim3 Grid(cusp.Grid_Dim_x, cusp.Grid_Dim_y); //Grid structure
dim3 Block(cusp.Block_Dim_x,cusp.Block_Dim_y); //Block structure, threads/block limited by specific device
hipLaunchKernelGGL(( update_flock_gpu), dim3(Grid),dim3(Block), 0, 0, dev_flock,wallx,wally,F.size,dt);
}
void FlockSim::initialFlock(int size)
{
/*
space 0~wallx 0~wally 0~wallz
*/
F.size = size;
F.flock =(Agent*) malloc(size*sizeof(Agent));
srand((unsigned)time(0));
for (int i = 0; i < F.size; ++i)
{
F.flock[i].angle = (float)rand()/(float)RAND_MAX*360.0;
F.flock[i].x = (float)(rand()%(int)wallx/2);
F.flock[i].y = (float)(rand()%(int)wally/2);
F.flock[i].v = (float)rand()/(float)RAND_MAX; // 0~1
}
}
void FlockSim::printFlock()
{
hipMemcpy(F.flock,dev_flock,F.size*sizeof(Agent),hipMemcpyDeviceToHost);
cout <<setw(8)<< "n"\
<<setw(8) << "ang"\
<<setw(8) << "x"\
<<setw(8) << "y"\
<<setw(8) << "v"\
<< endl;
for (int i = 0; i < F.size; ++i)
{
cout << setw(8) << i\
<<setw(8) <<(int) F.flock[i].angle \
<<setw(8) << setprecision(2)<< F.flock[i].x \
<<setw(8) << F.flock[i].y \
<<setw(8) << F.flock[i].v \
<<endl;
}
cout << endl;
}
void FlockSim::copy2host()
{
hipMemcpy(F.flock,dev_flock,F.size*sizeof(Agent),hipMemcpyDeviceToHost);
}
| 6e0ab19bd6d946fb59bce678057215b92de0b524.cu | #include"FlockSim.h"
#include<cuda.h>
#include<stdlib.h>
#include<stdio.h>
#include<iostream>
#include<iomanip>
#include<time.h>
//#define NDEBUG
using namespace std;
FlockSim::FlockSim(int size,float wall_size)
{
wallx = wall_size;
wally = wall_size;
initialFlock(size);
cudaMalloc((void**)&dev_flock,F.size*sizeof(Agent));
cudaMemcpy(dev_flock, F.flock, F.size*sizeof(Agent),cudaMemcpyHostToDevice);
}
__device__ float check_angle (float ang)
{
if (ang >= (float)360)
return ang - (float) 360;
if (ang < 0)
return ang + (float) 360;
else
return ang;
}
__global__ void update_flock_gpu (Agent* F, float wallx,float wally,int size,float dt)
{
int num = threadIdx.x + blockDim.x * blockIdx.x;
if (num < size)
{
F[num].x += cos(F[num].angle)*F[num].v*dt;
F[num].y += sin(F[num].angle)*F[num].v*dt;
if (F[num].x >= wallx/2 || F[num].x <= -wallx/2){
if (F[num].x >= wallx/2){F[num].x=wallx/2;}
else if(F[num].x<=-wallx/2){F[num].x=-wallx/2;}
F[num].angle = ((float)180.0 - F[num].angle);
}
if (F[num].y >= wally/2 || F[num].y <= -wally/2) {
if (F[num].y >= wally/2){F[num].y=wally/2;}
else if (F[num].y <= -wally/2){F[num].y=-wally/2;}
F[num].angle = (-(float)1.0* F[num].angle);
}
check_angle(F[num].angle);
}
}
void FlockSim::update_flock(float dt)
{
dim3 Grid(cusp.Grid_Dim_x, cusp.Grid_Dim_y); //Grid structure
dim3 Block(cusp.Block_Dim_x,cusp.Block_Dim_y); //Block structure, threads/block limited by specific device
update_flock_gpu<<<Grid,Block>>>(dev_flock,wallx,wally,F.size,dt);
}
void FlockSim::initialFlock(int size)
{
/*
space 0~wallx 0~wally 0~wallz
*/
F.size = size;
F.flock =(Agent*) malloc(size*sizeof(Agent));
srand((unsigned)time(0));
for (int i = 0; i < F.size; ++i)
{
F.flock[i].angle = (float)rand()/(float)RAND_MAX*360.0;
F.flock[i].x = (float)(rand()%(int)wallx/2);
F.flock[i].y = (float)(rand()%(int)wally/2);
F.flock[i].v = (float)rand()/(float)RAND_MAX; // 0~1
}
}
void FlockSim::printFlock()
{
cudaMemcpy(F.flock,dev_flock,F.size*sizeof(Agent),cudaMemcpyDeviceToHost);
cout <<setw(8)<< "n"\
<<setw(8) << "ang"\
<<setw(8) << "x"\
<<setw(8) << "y"\
<<setw(8) << "v"\
<< endl;
for (int i = 0; i < F.size; ++i)
{
cout << setw(8) << i\
<<setw(8) <<(int) F.flock[i].angle \
<<setw(8) << setprecision(2)<< F.flock[i].x \
<<setw(8) << F.flock[i].y \
<<setw(8) << F.flock[i].v \
<<endl;
}
cout << endl;
}
void FlockSim::copy2host()
{
cudaMemcpy(F.flock,dev_flock,F.size*sizeof(Agent),cudaMemcpyDeviceToHost);
}
|
3012690f6ae6bbf8bb5425889107f3fd4312ebcd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdint.h>
#include "async_blur.h"
__global__ void kernGaussianBlur(int width, int height, uint8_t * dst, uint8_t * src) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= width || y >= height) {
return;
}
float kernel[5][5] = {
{0.003765, 0.015019, 0.023792, 0.015019, 0.003765},
{0.015019, 0.059912, 0.094907, 0.059912, 0.015019},
{0.023792, 0.094907, 0.150342, 0.094907, 0.023792},
{0.015019, 0.059912, 0.094907, 0.059912, 0.015019},
{0.003765, 0.015019, 0.023792, 0.015019, 0.003765}
};
float r, g, b;
r = g = b = 0.0;
for (int i = 0; i < 5; i++) {
int tx = x + i - 2;
for (int j = 0; j < 5; j++) {
int ty = y + j - 2;
if (tx >= 0 && ty >= 0 && tx < width && ty < height) {
r += src[(ty * width + tx) * 3] * kernel[i][j];
g += src[(ty * width + tx) * 3 + 1] * kernel[i][j];
b += src[(ty * width + tx) * 3 + 2] * kernel[i][j];
}
}
}
int idx = 3 * (y * width + x);
dst[idx] = r;
dst[idx + 1] = g;
dst[idx + 2] = b;
return;
}
AsyncBlur::AsyncBlur(int width, int height) {
this->width = width;
this->height = height;
int sz = sizeof(uint8_t) * width * height * 3;
this->tmp_dst = (uint8_t *) malloc(sz);
for (int i = 0; i < 3; i++) {
hipMalloc(&this->dev_src[i], sz);
hipMalloc(&this->dev_dst[i], sz);
}
hipStreamCreate(&this->uploadStream);
hipStreamCreate(&this->computeStream);
hipStreamCreate(&this->downloadStream);
this->cur = 0;
}
AsyncBlur::~AsyncBlur() {
for (int i = 0; i < 3; i++) {
hipFree(this->dev_src[i]);
hipFree(this->dev_dst[i]);
}
free(this->tmp_dst);
}
AVPixelFormat AsyncBlur::getPixelFormat() {
return AV_PIX_FMT_RGB24;
}
int AsyncBlur::processFrame(uint8_t * frame) {
int sz = sizeof(uint8_t) * width * height * 3;
hipMemcpyAsync(this->dev_src[this->cur % 3], frame, sz, hipMemcpyHostToDevice, this->uploadStream);
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(this->width + blockSize2d.x - 1) / blockSize2d.x,
(this->height + blockSize2d.y - 1) / blockSize2d.y);
if (this->cur >= 1) {
hipLaunchKernelGGL(( kernGaussianBlur), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, this->computeStream,
this->width, this->height, this->dev_dst[(this->cur - 1) % 3], this->dev_src[(this->cur + 1) % 3]);
}
if (this->cur >= 2) {
hipMemcpyAsync(this->tmp_dst, this->dev_dst[(this->cur - 2) % 3], sz, hipMemcpyDeviceToHost, this->downloadStream);
}
hipDeviceSynchronize();
this->cur++;
// TODO: can we avoid this memcpy?
memcpy(frame, this->tmp_dst, sz);
return (this->cur >= 3) ? 0 : 1;
}
| 3012690f6ae6bbf8bb5425889107f3fd4312ebcd.cu | #include <cuda.h>
#include <stdint.h>
#include "async_blur.h"
__global__ void kernGaussianBlur(int width, int height, uint8_t * dst, uint8_t * src) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= width || y >= height) {
return;
}
float kernel[5][5] = {
{0.003765, 0.015019, 0.023792, 0.015019, 0.003765},
{0.015019, 0.059912, 0.094907, 0.059912, 0.015019},
{0.023792, 0.094907, 0.150342, 0.094907, 0.023792},
{0.015019, 0.059912, 0.094907, 0.059912, 0.015019},
{0.003765, 0.015019, 0.023792, 0.015019, 0.003765}
};
float r, g, b;
r = g = b = 0.0;
for (int i = 0; i < 5; i++) {
int tx = x + i - 2;
for (int j = 0; j < 5; j++) {
int ty = y + j - 2;
if (tx >= 0 && ty >= 0 && tx < width && ty < height) {
r += src[(ty * width + tx) * 3] * kernel[i][j];
g += src[(ty * width + tx) * 3 + 1] * kernel[i][j];
b += src[(ty * width + tx) * 3 + 2] * kernel[i][j];
}
}
}
int idx = 3 * (y * width + x);
dst[idx] = r;
dst[idx + 1] = g;
dst[idx + 2] = b;
return;
}
AsyncBlur::AsyncBlur(int width, int height) {
this->width = width;
this->height = height;
int sz = sizeof(uint8_t) * width * height * 3;
this->tmp_dst = (uint8_t *) malloc(sz);
for (int i = 0; i < 3; i++) {
cudaMalloc(&this->dev_src[i], sz);
cudaMalloc(&this->dev_dst[i], sz);
}
cudaStreamCreate(&this->uploadStream);
cudaStreamCreate(&this->computeStream);
cudaStreamCreate(&this->downloadStream);
this->cur = 0;
}
AsyncBlur::~AsyncBlur() {
for (int i = 0; i < 3; i++) {
cudaFree(this->dev_src[i]);
cudaFree(this->dev_dst[i]);
}
free(this->tmp_dst);
}
AVPixelFormat AsyncBlur::getPixelFormat() {
return AV_PIX_FMT_RGB24;
}
int AsyncBlur::processFrame(uint8_t * frame) {
int sz = sizeof(uint8_t) * width * height * 3;
cudaMemcpyAsync(this->dev_src[this->cur % 3], frame, sz, cudaMemcpyHostToDevice, this->uploadStream);
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(this->width + blockSize2d.x - 1) / blockSize2d.x,
(this->height + blockSize2d.y - 1) / blockSize2d.y);
if (this->cur >= 1) {
kernGaussianBlur<<<blocksPerGrid2d, blockSize2d, 0, this->computeStream>>>(
this->width, this->height, this->dev_dst[(this->cur - 1) % 3], this->dev_src[(this->cur + 1) % 3]);
}
if (this->cur >= 2) {
cudaMemcpyAsync(this->tmp_dst, this->dev_dst[(this->cur - 2) % 3], sz, cudaMemcpyDeviceToHost, this->downloadStream);
}
cudaDeviceSynchronize();
this->cur++;
// TODO: can we avoid this memcpy?
memcpy(frame, this->tmp_dst, sz);
return (this->cur >= 3) ? 0 : 1;
}
|
373b65deb211d4382b46b9eaf1ac5906a16aac27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmgeelltmv.cu, normal z -> c, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
cmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
extern __shared__ magmaFloatComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( cmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
hipLaunchKernelGGL(( cmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
| 373b65deb211d4382b46b9eaf1ac5906a16aac27.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmgeelltmv.cu, normal z -> c, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
cmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
extern __shared__ magmaFloatComplex dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
magmaFloatComplex val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaFloatComplex ); // num_vecs vectors
if (beta == MAGMA_C_ZERO) {
cmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
cmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
|
fd7ae47f3daeb2456e0cc38f97c481b8a6dd2106.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <functional>
#include <random>
#include "XLib.hpp"
using namespace xlib;
enum ThreadReduceOP { INCLUSIVE, INCLUSIVE_ILP, EXCLUSIVE };
__global__ void threadReduceTest(int* DataIN, int* DataOUT) {
int Local_data[32];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Add(Local_data);
DataOUT[SUM_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Min(Local_data);
DataOUT[MIN_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Max(Local_data);
DataOUT[MAX_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::LogicAnd(Local_data);
DataOUT[LOGIC_AND_OP] = Local_data[0];
}
int main() {
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator (seed);
std::uniform_int_distribution<int> distribution(-50, 50);
const int INPUT_SIZE = 32;
const int N_OF_OPERATIONS = 4;
int DataIN[INPUT_SIZE];
int DataOUT[N_OF_OPERATIONS];
int DataOUT_copy[N_OF_OPERATIONS];
int* devDataIN, *devDataOUT;
__SAFE_CALL( hipMalloc(&devDataIN, sizeof(DataIN)) );
__SAFE_CALL( hipMalloc(&devDataOUT, sizeof(DataOUT)) );
for (int i = 0; i < INPUT_SIZE; i++)
DataIN[i] = distribution(generator);
xlib::printArray(DataIN, 32);
__SAFE_CALL( hipMemcpy(devDataIN, DataIN, sizeof(DataIN),
hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( threadReduceTest), dim3(1), dim3(1), 0, 0, devDataIN, devDataOUT);
__SAFE_CALL( hipMemcpy(DataOUT_copy, devDataOUT, sizeof(DataOUT),
hipMemcpyDeviceToHost) );
DataOUT[SUM_OP] = std::accumulate(DataIN, DataIN + INPUT_SIZE, 0);
if (DataOUT[SUM_OP] != DataOUT_copy[SUM_OP]) {
__ERROR("ThreadReduce (SUM) : " << DataOUT[SUM_OP] << "\t"
<< DataOUT_copy[SUM_OP]);
}
DataOUT[MIN_OP] = *std::min_element(DataIN, DataIN + INPUT_SIZE);
if (DataOUT[MIN_OP] != DataOUT_copy[MIN_OP]) {
__ERROR("ThreadReduce (MIN) : " << DataOUT[MIN_OP] << "\t"
<< DataOUT_copy[MIN_OP]);
}
DataOUT[MAX_OP] = *std::max_element(DataIN, DataIN + INPUT_SIZE);
if (DataOUT[MAX_OP] != DataOUT_copy[MAX_OP]) {
__ERROR("ThreadReduce (MAX) : " << DataOUT[MAX_OP] << "\t"
<< DataOUT_copy[MAX_OP]);
}
DataOUT[LOGIC_AND_OP] = DataIN[0];
for (int i = 1; i < INPUT_SIZE; i++)
DataOUT[LOGIC_AND_OP] = DataOUT[LOGIC_AND_OP] && DataIN[i];
if (DataOUT[LOGIC_AND_OP] != DataOUT_copy[LOGIC_AND_OP]) {
__ERROR("ThreadReduce (AND) : " << DataOUT[LOGIC_AND_OP] << "\t"
<< DataOUT_copy[LOGIC_AND_OP]);
}
}
| fd7ae47f3daeb2456e0cc38f97c481b8a6dd2106.cu | #include <iostream>
#include <chrono>
#include <functional>
#include <random>
#include "XLib.hpp"
using namespace xlib;
enum ThreadReduceOP { INCLUSIVE, INCLUSIVE_ILP, EXCLUSIVE };
__global__ void threadReduceTest(int* DataIN, int* DataOUT) {
int Local_data[32];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Add(Local_data);
DataOUT[SUM_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Min(Local_data);
DataOUT[MIN_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::Max(Local_data);
DataOUT[MAX_OP] = Local_data[0];
for (int i = 0; i < 32; i++)
Local_data[i] = DataIN[i];
ThreadReduce::LogicAnd(Local_data);
DataOUT[LOGIC_AND_OP] = Local_data[0];
}
int main() {
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator (seed);
std::uniform_int_distribution<int> distribution(-50, 50);
const int INPUT_SIZE = 32;
const int N_OF_OPERATIONS = 4;
int DataIN[INPUT_SIZE];
int DataOUT[N_OF_OPERATIONS];
int DataOUT_copy[N_OF_OPERATIONS];
int* devDataIN, *devDataOUT;
__SAFE_CALL( cudaMalloc(&devDataIN, sizeof(DataIN)) );
__SAFE_CALL( cudaMalloc(&devDataOUT, sizeof(DataOUT)) );
for (int i = 0; i < INPUT_SIZE; i++)
DataIN[i] = distribution(generator);
xlib::printArray(DataIN, 32);
__SAFE_CALL( cudaMemcpy(devDataIN, DataIN, sizeof(DataIN),
cudaMemcpyHostToDevice) );
threadReduceTest<<<1, 1>>>(devDataIN, devDataOUT);
__SAFE_CALL( cudaMemcpy(DataOUT_copy, devDataOUT, sizeof(DataOUT),
cudaMemcpyDeviceToHost) );
DataOUT[SUM_OP] = std::accumulate(DataIN, DataIN + INPUT_SIZE, 0);
if (DataOUT[SUM_OP] != DataOUT_copy[SUM_OP]) {
__ERROR("ThreadReduce (SUM) : " << DataOUT[SUM_OP] << "\t"
<< DataOUT_copy[SUM_OP]);
}
DataOUT[MIN_OP] = *std::min_element(DataIN, DataIN + INPUT_SIZE);
if (DataOUT[MIN_OP] != DataOUT_copy[MIN_OP]) {
__ERROR("ThreadReduce (MIN) : " << DataOUT[MIN_OP] << "\t"
<< DataOUT_copy[MIN_OP]);
}
DataOUT[MAX_OP] = *std::max_element(DataIN, DataIN + INPUT_SIZE);
if (DataOUT[MAX_OP] != DataOUT_copy[MAX_OP]) {
__ERROR("ThreadReduce (MAX) : " << DataOUT[MAX_OP] << "\t"
<< DataOUT_copy[MAX_OP]);
}
DataOUT[LOGIC_AND_OP] = DataIN[0];
for (int i = 1; i < INPUT_SIZE; i++)
DataOUT[LOGIC_AND_OP] = DataOUT[LOGIC_AND_OP] && DataIN[i];
if (DataOUT[LOGIC_AND_OP] != DataOUT_copy[LOGIC_AND_OP]) {
__ERROR("ThreadReduce (AND) : " << DataOUT[LOGIC_AND_OP] << "\t"
<< DataOUT_copy[LOGIC_AND_OP]);
}
}
|
1bb42b17c0a10a30c20ab7e0495b109734995fdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "layer.h"
#include "AffineTransform.h"
#include "net.h"
#include <stdlib.h>
#include <stdio.h>
#include <iomanip>
#define DEBUG 0
static void Print_matrix_to_file1(const char nmfile[], float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
std::ofstream outseis(nmfile); // output, normal file
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
hipMemcpy(data_host, mat, n*m*sizeof(float), hipMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
//printf("%f ", temp);
buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}else{
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
//printf("%f ", temp);
buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}
free(data_host);
}
static void Print_matrix_to_file(const char nmfile[], float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
std::ofstream outseis(nmfile); // output, normal file
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
hipMemcpy(data_host, mat, n*m*sizeof(float), hipMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
//printf("%f ", temp);
buf << std::setprecision(8) << *temp <<" ";
//buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}else{
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
//printf("%f ", temp);
buf << std::setprecision(8) << *temp <<" ";
//buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}
free(data_host);
}
static void Print_matrix(float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
hipMemcpy(data_host, mat, n*m*sizeof(float), hipMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}else{
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}
free(data_host);
}
int main(int argc, char* argv[]) {
int seqLength=100;
int numLayers=4;
int hiddenSize=320;
int input_dim=2*hiddenSize;
int output_dim=46;
float* x_in;
float* x_out;
float* x_in_d;
float* x_out_d;
float* x_out_soft;
if (argc == 4) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
}
else if (argc == 1) {
seqLength = 100;
numLayers = 4;
hiddenSize = 320;
}
//x_in = (float*)malloc(seqLength*input_dim*sizeof(float));
hipHostMalloc((void**)&x_in, seqLength*input_dim*sizeof(float)); //pinned memory
hipHostMalloc((void**)&x_out, seqLength*output_dim*sizeof(float)); //pinned memory
//x_out = (float*)malloc(seqLength*output_dim*sizeof(float));
hipMalloc((void**)&x_in_d, seqLength * input_dim * sizeof(float));
hipMalloc((void**)&x_out_d, seqLength * output_dim * sizeof(float));
hipMalloc((void**)&x_out_soft, seqLength * output_dim * sizeof(float));
//srand (time(NULL));
for(int i=0; i<seqLength * input_dim; i++){
if(((rand() % 10) % 2)==0)
x_in[i]=(rand() % 10)/10.f;
else
x_in[i]=-(rand() % 10)/10.f;
//printf("%f ", x_in[i]);
}
Net* N;
Layer* L;
AffineTransform* A;
N = new Net(4,hiddenSize);
N->Resize(seqLength);
for(int i=0; i<numLayers; i++){
if(i==0)
L = new Layer(input_dim,2*hiddenSize,hiddenSize);
else
L = new Layer(2*hiddenSize,2*hiddenSize,hiddenSize);
L->Init();
if(DEBUG){
char s1[2]="H";
char s2[5]=".txt";
char s3[5]="bias";
char s4[3]="Wx";
char s5[3]="Wh";
char pi[8]="phole_i";
char pf[8]="phole_f";
char po[8]="phole_o";
char result[19];
char result1[13];
char result2[11];
sprintf(result1,"%s%d%s",s3,i,s2);
Print_matrix_to_file(result1, L->bias_fw(), 4*hiddenSize, 1, 1);
sprintf(result2,"%s%d%s",s4,i,s2);
if(i==0)
Print_matrix_to_file(result2, L->wei_gifo_x_fw(), 4*hiddenSize, input_dim, 1);
else
Print_matrix_to_file(result2, L->wei_gifo_x_fw(), 4*hiddenSize, 2*hiddenSize, 1);
sprintf(result2,"%s%d%s",s5,i,s2);
Print_matrix_to_file(result2, L->wei_gifo_m_fw(), 4*hiddenSize, hiddenSize, 1);
sprintf(result,"%s%d%s",pi,i,s2);
Print_matrix_to_file(result, L->phole_i_c_fw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",pf,i,s2);
Print_matrix_to_file(result, L->phole_f_c_fw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",po,i,s2);
Print_matrix_to_file(result, L->phole_o_c_fw(), hiddenSize, 1, 1);
char s6[8]="bw.txt";
sprintf(result1,"%s%d%s",s3,i,s6);
Print_matrix_to_file(result1, L->bias_bw(), 4*hiddenSize, 1, 1);
sprintf(result2,"%s%d%s",s4,i,s6);
if(i==0)
Print_matrix_to_file(result2, L->wei_gifo_x_bw(), 4*hiddenSize, input_dim, 1);
else
Print_matrix_to_file(result2, L->wei_gifo_x_bw(), 4*hiddenSize, 2*hiddenSize, 1);
sprintf(result2,"%s%d%s",s5,i,s6);
Print_matrix_to_file(result2, L->wei_gifo_m_bw(), 4*hiddenSize, hiddenSize, 1);
sprintf(result,"%s%d%s",pi,i,s6);
Print_matrix_to_file(result, L->phole_i_c_bw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",pf,i,s6);
Print_matrix_to_file(result, L->phole_f_c_bw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",po,i,s6);
Print_matrix_to_file(result, L->phole_o_c_bw(), hiddenSize, 1, 1);
}
N->AppendLayer(L);
}
A = new AffineTransform(2*hiddenSize, output_dim);
A->Init();
if(DEBUG){ Print_matrix_to_file("WA.txt", A->wei_affine(), output_dim, input_dim, 1);
Print_matrix_to_file("bA.txt", A->bias(), output_dim, 1, 1);}
N->AppendAffineTransformLayer(A);
hipMemcpy( x_in_d, x_in, seqLength * input_dim *sizeof(float), hipMemcpyHostToDevice);
if(DEBUG) Print_matrix_to_file1("X.txt", x_in_d, input_dim, seqLength, 1);
hipEvent_t start, stop;
float elapsedTime=0.f;
hipblasHandle_t handle;
hipStream_t stream=NULL;
size_t dimBlock = output_dim > CU1DBLOCK ? CU1DBLOCK : output_dim;
size_t dimGrid = seqLength;
hipblasCreate(&handle);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy( x_in_d, x_in, seqLength * input_dim *sizeof(float), hipMemcpyHostToDevice);
N->Feedforward(handle, x_in_d, x_out_d, seqLength);
softmax_reduce_w(dimGrid, dimBlock, stream, x_out_soft, x_out_d, seqLength, output_dim);
hipMemcpy( x_out, x_out_soft, seqLength * output_dim *sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("%f ", elapsedTime);
if(DEBUG){
Print_matrix_to_file("X1.txt", N->PropagateBuffer(), 2 * hiddenSize, seqLength, 1);
Print_matrix_to_file("X2.txt", N->PropagateBuffer() + 2 * hiddenSize*seqLength, 2 * hiddenSize, seqLength, 1);
Print_matrix_to_file("Xout.txt", x_out_d, output_dim, seqLength, 1);
Print_matrix_to_file("Xsoft.txt", x_out_soft, output_dim, seqLength, 1);
}
delete N;
//cudaErrCheck(hipMemcpy( devciao1, ciao1, sizeof(float), hipMemcpyHostToDevice));
}
| 1bb42b17c0a10a30c20ab7e0495b109734995fdb.cu | #include "layer.h"
#include "AffineTransform.h"
#include "net.h"
#include <stdlib.h>
#include <stdio.h>
#include <iomanip>
#define DEBUG 0
static void Print_matrix_to_file1(const char nmfile[], float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
std::ofstream outseis(nmfile); // output, normal file
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
cudaMemcpy(data_host, mat, n*m*sizeof(float), cudaMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
//printf("%f ", temp);
buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}else{
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
//printf("%f ", temp);
buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}
free(data_host);
}
static void Print_matrix_to_file(const char nmfile[], float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
std::ofstream outseis(nmfile); // output, normal file
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
cudaMemcpy(data_host, mat, n*m*sizeof(float), cudaMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
//printf("%f ", temp);
buf << std::setprecision(8) << *temp <<" ";
//buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}else{
for (int jj=0; jj<n; jj++)
{
std::stringstream buf;
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
//printf("%f ", temp);
buf << std::setprecision(8) << *temp <<" ";
//buf << *temp <<" ";
//if(jj==101) printf("%f ", *temp);
}
outseis << buf.str() << "\n";
//printf("\n%d %d row, col", jj, ii);
}
}
free(data_host);
}
static void Print_matrix(float* mat, int n, int m, int r_c){
//const char nmfile[] = "out.txt";
float *data_host;
data_host=(float*)malloc(n*m*sizeof(float));
cudaMemcpy(data_host, mat, n*m*sizeof(float), cudaMemcpyDeviceToHost); // this won't work, will throw error
if(r_c==0){
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+jj*m+ii);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}else{
for (int jj=0; jj<n; jj++)
{
int ii;
for (ii=0; ii<m; ii++)
{
float* temp=(float *)(data_host+ii*n+jj);
printf("%.10e ", *temp);
//if(jj==101) printf("%f ", *temp);
}
printf("\n");
}
}
free(data_host);
}
int main(int argc, char* argv[]) {
int seqLength=100;
int numLayers=4;
int hiddenSize=320;
int input_dim=2*hiddenSize;
int output_dim=46;
float* x_in;
float* x_out;
float* x_in_d;
float* x_out_d;
float* x_out_soft;
if (argc == 4) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
}
else if (argc == 1) {
seqLength = 100;
numLayers = 4;
hiddenSize = 320;
}
//x_in = (float*)malloc(seqLength*input_dim*sizeof(float));
cudaMallocHost((void**)&x_in, seqLength*input_dim*sizeof(float)); //pinned memory
cudaMallocHost((void**)&x_out, seqLength*output_dim*sizeof(float)); //pinned memory
//x_out = (float*)malloc(seqLength*output_dim*sizeof(float));
cudaMalloc((void**)&x_in_d, seqLength * input_dim * sizeof(float));
cudaMalloc((void**)&x_out_d, seqLength * output_dim * sizeof(float));
cudaMalloc((void**)&x_out_soft, seqLength * output_dim * sizeof(float));
//srand (time(NULL));
for(int i=0; i<seqLength * input_dim; i++){
if(((rand() % 10) % 2)==0)
x_in[i]=(rand() % 10)/10.f;
else
x_in[i]=-(rand() % 10)/10.f;
//printf("%f ", x_in[i]);
}
Net* N;
Layer* L;
AffineTransform* A;
N = new Net(4,hiddenSize);
N->Resize(seqLength);
for(int i=0; i<numLayers; i++){
if(i==0)
L = new Layer(input_dim,2*hiddenSize,hiddenSize);
else
L = new Layer(2*hiddenSize,2*hiddenSize,hiddenSize);
L->Init();
if(DEBUG){
char s1[2]="H";
char s2[5]=".txt";
char s3[5]="bias";
char s4[3]="Wx";
char s5[3]="Wh";
char pi[8]="phole_i";
char pf[8]="phole_f";
char po[8]="phole_o";
char result[19];
char result1[13];
char result2[11];
sprintf(result1,"%s%d%s",s3,i,s2);
Print_matrix_to_file(result1, L->bias_fw(), 4*hiddenSize, 1, 1);
sprintf(result2,"%s%d%s",s4,i,s2);
if(i==0)
Print_matrix_to_file(result2, L->wei_gifo_x_fw(), 4*hiddenSize, input_dim, 1);
else
Print_matrix_to_file(result2, L->wei_gifo_x_fw(), 4*hiddenSize, 2*hiddenSize, 1);
sprintf(result2,"%s%d%s",s5,i,s2);
Print_matrix_to_file(result2, L->wei_gifo_m_fw(), 4*hiddenSize, hiddenSize, 1);
sprintf(result,"%s%d%s",pi,i,s2);
Print_matrix_to_file(result, L->phole_i_c_fw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",pf,i,s2);
Print_matrix_to_file(result, L->phole_f_c_fw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",po,i,s2);
Print_matrix_to_file(result, L->phole_o_c_fw(), hiddenSize, 1, 1);
char s6[8]="bw.txt";
sprintf(result1,"%s%d%s",s3,i,s6);
Print_matrix_to_file(result1, L->bias_bw(), 4*hiddenSize, 1, 1);
sprintf(result2,"%s%d%s",s4,i,s6);
if(i==0)
Print_matrix_to_file(result2, L->wei_gifo_x_bw(), 4*hiddenSize, input_dim, 1);
else
Print_matrix_to_file(result2, L->wei_gifo_x_bw(), 4*hiddenSize, 2*hiddenSize, 1);
sprintf(result2,"%s%d%s",s5,i,s6);
Print_matrix_to_file(result2, L->wei_gifo_m_bw(), 4*hiddenSize, hiddenSize, 1);
sprintf(result,"%s%d%s",pi,i,s6);
Print_matrix_to_file(result, L->phole_i_c_bw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",pf,i,s6);
Print_matrix_to_file(result, L->phole_f_c_bw(), hiddenSize, 1, 1);
sprintf(result,"%s%d%s",po,i,s6);
Print_matrix_to_file(result, L->phole_o_c_bw(), hiddenSize, 1, 1);
}
N->AppendLayer(L);
}
A = new AffineTransform(2*hiddenSize, output_dim);
A->Init();
if(DEBUG){ Print_matrix_to_file("WA.txt", A->wei_affine(), output_dim, input_dim, 1);
Print_matrix_to_file("bA.txt", A->bias(), output_dim, 1, 1);}
N->AppendAffineTransformLayer(A);
cudaMemcpy( x_in_d, x_in, seqLength * input_dim *sizeof(float), cudaMemcpyHostToDevice);
if(DEBUG) Print_matrix_to_file1("X.txt", x_in_d, input_dim, seqLength, 1);
cudaEvent_t start, stop;
float elapsedTime=0.f;
cublasHandle_t handle;
cudaStream_t stream=NULL;
size_t dimBlock = output_dim > CU1DBLOCK ? CU1DBLOCK : output_dim;
size_t dimGrid = seqLength;
cublasCreate(&handle);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy( x_in_d, x_in, seqLength * input_dim *sizeof(float), cudaMemcpyHostToDevice);
N->Feedforward(handle, x_in_d, x_out_d, seqLength);
softmax_reduce_w(dimGrid, dimBlock, stream, x_out_soft, x_out_d, seqLength, output_dim);
cudaMemcpy( x_out, x_out_soft, seqLength * output_dim *sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("%f ", elapsedTime);
if(DEBUG){
Print_matrix_to_file("X1.txt", N->PropagateBuffer(), 2 * hiddenSize, seqLength, 1);
Print_matrix_to_file("X2.txt", N->PropagateBuffer() + 2 * hiddenSize*seqLength, 2 * hiddenSize, seqLength, 1);
Print_matrix_to_file("Xout.txt", x_out_d, output_dim, seqLength, 1);
Print_matrix_to_file("Xsoft.txt", x_out_soft, output_dim, seqLength, 1);
}
delete N;
//cudaErrCheck(cudaMemcpy( devciao1, ciao1, sizeof(float), cudaMemcpyHostToDevice));
}
|
5e03a064c1c4d4d2523b445242b7810a47a36592.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// copy_float1 copy_float2 copy_float3 copy_float4 copy_float5
// copy_double1 copy_double2 copy_double3 copy_double4 copy_double5
#include "array.h"
template<typename T, int N>
__device__ void copy(Array<T,N> &dest, Array<T,N> &src) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= dest.length()) return;
dest(idx) = src(idx);
}
#define COPY_CAPI(T,N) \
__global__ void copy ## _ ## T ## N(Array<T,N> dest, Array<T,N> src) { \
int idx = threadIdx.x + blockIdx.x * blockDim.x; \
if (idx >= dest.length()) return; \
dest(idx) = src(idx); \
}
extern "C" {
COPY_CAPI(float,1)
COPY_CAPI(float,2)
COPY_CAPI(float,3)
COPY_CAPI(float,4)
COPY_CAPI(float,5)
COPY_CAPI(double,1)
COPY_CAPI(double,2)
COPY_CAPI(double,3)
COPY_CAPI(double,4)
COPY_CAPI(double,5)
}
/*
@nvrtc """
extern "C" __global__ void $(op)_$T(Array<$T,1> x, Array<$T,1> y) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < y.length()) y[idx] = $(op)(x[idx]);
}
extern "C" __global__ void $(op)_T(Array<$T,1> x1, Array<$T,1> x2, Array<$T,1> y) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= y.length()) return;
int idx_x1 = idx < x1.length() ? idx : idx % x1.length();
int idx_x2 = idx < x2.length() ? idx : idx % x2.length();
y(idx) = x1[idx_x1] $op x2[idx_x2];
}
"""
@nvrtc """
extern "C" __global__ void copy1d(Array<$T,1> dest, Array<$T,1> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
if (idx0 >= src.dims[0]) return;
dest(idx0) = src(idx0);
}
extern "C" __global__ void copy2d(Array<$T,2> dest, Array<$T,2> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
int idx1 = threadIdx.y + blockIdx.y * blockDim.y;
if (idx0 >= src.dims[0] || idx1 >= src.dims[1]) return;
dest(idx0,idx1) = src(idx0,idx1);
}
extern "C" __global__ void copy3d(Array<$T,3> dest, Array<$T,3> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
int idx1 = threadIdx.y + blockIdx.y * blockDim.y;
int idx2 = threadIdx.z + blockIdx.z * blockDim.z;
if (idx0 >= src.dims[0] || idx1 >= src.dims[1] || idx2 >= src.dims[2]) return;
dest(idx0,idx1,idx2) = src(idx0,idx1,idx2);
}
extern "C" __global__ void copynd(Array<$T,$N> dest, Array<$T,$N> src) {
}
*/
| 5e03a064c1c4d4d2523b445242b7810a47a36592.cu | // copy_float1 copy_float2 copy_float3 copy_float4 copy_float5
// copy_double1 copy_double2 copy_double3 copy_double4 copy_double5
#include "array.h"
template<typename T, int N>
__device__ void copy(Array<T,N> &dest, Array<T,N> &src) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= dest.length()) return;
dest(idx) = src(idx);
}
#define COPY_CAPI(T,N) \
__global__ void copy ## _ ## T ## N(Array<T,N> dest, Array<T,N> src) { \
int idx = threadIdx.x + blockIdx.x * blockDim.x; \
if (idx >= dest.length()) return; \
dest(idx) = src(idx); \
}
extern "C" {
COPY_CAPI(float,1)
COPY_CAPI(float,2)
COPY_CAPI(float,3)
COPY_CAPI(float,4)
COPY_CAPI(float,5)
COPY_CAPI(double,1)
COPY_CAPI(double,2)
COPY_CAPI(double,3)
COPY_CAPI(double,4)
COPY_CAPI(double,5)
}
/*
@nvrtc """
extern "C" __global__ void $(op)_$T(Array<$T,1> x, Array<$T,1> y) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < y.length()) y[idx] = $(op)(x[idx]);
}
extern "C" __global__ void $(op)_T(Array<$T,1> x1, Array<$T,1> x2, Array<$T,1> y) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= y.length()) return;
int idx_x1 = idx < x1.length() ? idx : idx % x1.length();
int idx_x2 = idx < x2.length() ? idx : idx % x2.length();
y(idx) = x1[idx_x1] $op x2[idx_x2];
}
"""
@nvrtc """
extern "C" __global__ void copy1d(Array<$T,1> dest, Array<$T,1> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
if (idx0 >= src.dims[0]) return;
dest(idx0) = src(idx0);
}
extern "C" __global__ void copy2d(Array<$T,2> dest, Array<$T,2> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
int idx1 = threadIdx.y + blockIdx.y * blockDim.y;
if (idx0 >= src.dims[0] || idx1 >= src.dims[1]) return;
dest(idx0,idx1) = src(idx0,idx1);
}
extern "C" __global__ void copy3d(Array<$T,3> dest, Array<$T,3> src) {
int idx0 = threadIdx.x + blockIdx.x * blockDim.x;
int idx1 = threadIdx.y + blockIdx.y * blockDim.y;
int idx2 = threadIdx.z + blockIdx.z * blockDim.z;
if (idx0 >= src.dims[0] || idx1 >= src.dims[1] || idx2 >= src.dims[2]) return;
dest(idx0,idx1,idx2) = src(idx0,idx1,idx2);
}
extern "C" __global__ void copynd(Array<$T,$N> dest, Array<$T,$N> src) {
}
*/
|
c2765cea454740874452c8b7e412b14d82fa4f30.hip | // !!! This is a file automatically generated by hipify!!!
#include "ndt_gpu/SymmetricEigenSolver.h"
#include "ndt_gpu/debug.h"
namespace gpu {
SymmetricEigensolver3x3::SymmetricEigensolver3x3(int offset)
{
offset_ = offset;
checkCudaErrors(hipMalloc(&buffer_, sizeof(double) * 18 * offset_));
checkCudaErrors(hipMalloc(&maxAbsElement_, sizeof(double) * offset_));
checkCudaErrors(hipMalloc(&norm_, sizeof(double) * offset_));
checkCudaErrors(hipMalloc(&i02_, sizeof(int) * 2 * offset_));
eigenvectors_ = NULL;
eigenvalues_ = NULL;
input_matrices_ = NULL;
is_copied_ = false;
}
void SymmetricEigensolver3x3::setInputMatrices(double *input_matrices)
{
input_matrices_ = input_matrices;
}
void SymmetricEigensolver3x3::setEigenvectors(double *eigenvectors)
{
eigenvectors_ = eigenvectors;
}
void SymmetricEigensolver3x3::setEigenvalues(double *eigenvalues)
{
eigenvalues_ = eigenvalues;
}
double* SymmetricEigensolver3x3::getBuffer() const
{
return buffer_;
}
void SymmetricEigensolver3x3::memFree()
{
if (!is_copied_) {
if (buffer_ != NULL) {
checkCudaErrors(hipFree(buffer_));
buffer_ = NULL;
}
if (maxAbsElement_ != NULL) {
checkCudaErrors(hipFree(maxAbsElement_));
maxAbsElement_ = NULL;
}
if (norm_ != NULL) {
checkCudaErrors(hipFree(norm_));
norm_ = NULL;
}
if (i02_ != NULL) {
checkCudaErrors(hipFree(i02_));
i02_ = NULL;
}
}
}
}
| c2765cea454740874452c8b7e412b14d82fa4f30.cu | #include "ndt_gpu/SymmetricEigenSolver.h"
#include "ndt_gpu/debug.h"
namespace gpu {
SymmetricEigensolver3x3::SymmetricEigensolver3x3(int offset)
{
offset_ = offset;
checkCudaErrors(cudaMalloc(&buffer_, sizeof(double) * 18 * offset_));
checkCudaErrors(cudaMalloc(&maxAbsElement_, sizeof(double) * offset_));
checkCudaErrors(cudaMalloc(&norm_, sizeof(double) * offset_));
checkCudaErrors(cudaMalloc(&i02_, sizeof(int) * 2 * offset_));
eigenvectors_ = NULL;
eigenvalues_ = NULL;
input_matrices_ = NULL;
is_copied_ = false;
}
void SymmetricEigensolver3x3::setInputMatrices(double *input_matrices)
{
input_matrices_ = input_matrices;
}
void SymmetricEigensolver3x3::setEigenvectors(double *eigenvectors)
{
eigenvectors_ = eigenvectors;
}
void SymmetricEigensolver3x3::setEigenvalues(double *eigenvalues)
{
eigenvalues_ = eigenvalues;
}
double* SymmetricEigensolver3x3::getBuffer() const
{
return buffer_;
}
void SymmetricEigensolver3x3::memFree()
{
if (!is_copied_) {
if (buffer_ != NULL) {
checkCudaErrors(cudaFree(buffer_));
buffer_ = NULL;
}
if (maxAbsElement_ != NULL) {
checkCudaErrors(cudaFree(maxAbsElement_));
maxAbsElement_ = NULL;
}
if (norm_ != NULL) {
checkCudaErrors(cudaFree(norm_));
norm_ = NULL;
}
if (i02_ != NULL) {
checkCudaErrors(cudaFree(i02_));
i02_ = NULL;
}
}
}
}
|
facfc3a207f46277d854d6a883c481040fe4ed21.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename FROM_T, typename TO_T>
__device__ void convert(const FROM_T vi, TO_T& vo)
{
vo = static_cast<TO_T>(vi);
}
template <>
__device__ void convert(const float vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = vi;
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, float& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = static_cast<float>(t.as_half);
}
template <>
__device__ void convert(const at::Half vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = static_cast<float>(vi);
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, at::Half& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = t.as_half;
}
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const MATH_T* per_tensor_beta1,
const MATH_T* per_tensor_beta2,
const MATH_T* per_tensor_beta3,
const int* per_tensor_bias_correction,
const int* step,
const MATH_T* per_tensor_epsilon,
adamMode_t mode,
const MATH_T* per_tensor_decay,
const MATH_T* global_scale,
const MATH_T* global_grad_norm,
const float max_grad_norm)
{
// I'd like this kernel to propagate infs/nans.
if (*noop_gmem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float combined_scale = *global_scale;
if (max_grad_norm > 0) {
combined_scale = max_grad_norm / (*global_grad_norm / *global_scale + 1e-6);
combined_scale = *global_scale / ::min((float) 1.0, combined_scale);
}
MATH_T beta1 = per_tensor_beta1[tensor_num];
MATH_T beta2 = per_tensor_beta2[tensor_num];
MATH_T beta3 = 1 - beta1;
MATH_T beta1_correction, beta2_correction;
if (per_tensor_bias_correction[tensor_num] == 1) {
beta1_correction = 1 - pow(beta1, *step);
beta2_correction = 1 - pow(beta2, *step);
} else {
beta1_correction = (MATH_T) 1.0;
beta2_correction = (MATH_T) 1.0;
}
MATH_T epsilon = per_tensor_epsilon[tensor_num];
MATH_T decay = per_tensor_decay[tensor_num];
GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
MATH_T* u = (MATH_T*)tl.addresses[4][tensor_loc];
u += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(g) &&
is_aligned(p) &&
is_aligned(m) &&
is_aligned(v))
{
GRAD_T l_g[ILP];
T l_p[ILP];
T l_m[ILP];
T l_v[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(l_g, g, 0, i_start);
if (decay != 0)
load_store(l_p, p, 0, i_start);
load_store(l_m, m, 0, i_start);
load_store(l_v, v, 0, i_start);
// unpack
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_g[ii] = l_g[ii];
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = l_p[ii];
}
r_m[ii] = l_m[ii];
r_v[ii] = l_v[ii];
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / combined_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / combined_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
l_m[ii] = r_m[ii];
l_v[ii] = r_v[ii];
}
// store
load_store(u, r_p, i_start, 0);
load_store(m, l_m, i_start, 0);
load_store(v, l_v, i_start, 0);
}
}
else
{
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
// special ?optimization? for lamb stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / combined_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / combined_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
u[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const MATH_T* per_tensor_param_norm,
const MATH_T* per_tensor_update_norm,
const long* update_norm_offset,
const MATH_T* learning_rate,
const MATH_T* per_tensor_decay,
const MATH_T* global_grad_norm,
bool use_nvlamb)
{
// I'd like this kernel to propagate infs/nans.
if (*noop_gmem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T decay = per_tensor_decay[tensor_num];
MATH_T ratio = *learning_rate;
// nvlamb: apply adaptive learning rate to all parameters
// otherwise, only apply to those with non-zero weight decay
if (use_nvlamb || (decay != (MATH_T) 0.0))
{
MATH_T param_norm = per_tensor_param_norm[tensor_num];
MATH_T update_norm = per_tensor_update_norm[update_norm_offset[tensor_num]];
ratio = (update_norm != 0.0 && param_norm != 0.0) ? (*learning_rate) * (param_norm / update_norm) : (*learning_rate);
}
MATH_T* update = (MATH_T*)tl.addresses[0][tensor_loc];
update += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
GRAD_T* p_copy = (GRAD_T*)tl.addresses[2][tensor_loc];
p_copy += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(p) &&
is_aligned(update))
{
T r_p[ILP];
MATH_T r_update[ILP];
GRAD_T r_p_copy[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_p, p, 0, i_start);
load_store(r_update, update, 0, i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * r_update[ii]);
convert(r_p[ii], r_p_copy[ii]);
}
load_store(p, r_p, i_start, 0);
load_store(p_copy, r_p_copy, i_start, 0);
}
}
else
{
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio * r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
convert(r_p[ii], p_copy[i]);
}
}
}
}
}
};
void multi_tensor_lamb_compute_update_term_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_beta1,
at::Tensor per_tensor_beta2,
at::Tensor per_tensor_beta3,
at::Tensor per_tensor_bias_correction,
at::Tensor step,
at::Tensor per_tensor_epsilon,
const int mode,
at::Tensor per_tensor_decay,
at::Tensor global_scale,
at::Tensor global_grad_norm,
const float max_grad_norm)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 1, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_beta1.DATA_PTR<scalar_t_2>(),
per_tensor_beta2.DATA_PTR<scalar_t_2>(),
per_tensor_beta3.DATA_PTR<scalar_t_2>(),
per_tensor_bias_correction.DATA_PTR<int>(),
step.DATA_PTR<int>(),
per_tensor_epsilon.DATA_PTR<scalar_t_2>(),
(adamMode_t) mode,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
global_scale.DATA_PTR<scalar_t_2>(),
global_grad_norm.DATA_PTR<scalar_t_2>(),
max_grad_norm); )))
AT_CUDA_CHECK(hipGetLastError());
}
void multi_tensor_lamb_update_weights_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
at::Tensor update_norm_offset,
at::Tensor learning_rate,
at::Tensor per_tensor_decay,
at::Tensor global_grad_norm,
bool use_nvlamb)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[2][0].scalar_type(), 1, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 2, "lamb_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage2Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_param_norm.DATA_PTR<scalar_t_2>(),
per_tensor_update_norm.DATA_PTR<scalar_t_2>(),
update_norm_offset.DATA_PTR<long>(),
learning_rate.DATA_PTR<scalar_t_2>(),
per_tensor_decay.DATA_PTR<scalar_t_2>(),
global_grad_norm.DATA_PTR<scalar_t_2>(),
use_nvlamb); )))
AT_CUDA_CHECK(hipGetLastError());
}
| facfc3a207f46277d854d6a883c481040fe4ed21.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template <typename FROM_T, typename TO_T>
__device__ void convert(const FROM_T vi, TO_T& vo)
{
vo = static_cast<TO_T>(vi);
}
template <>
__device__ void convert(const float vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = vi;
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, float& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = static_cast<float>(t.as_half);
}
template <>
__device__ void convert(const at::Half vi, uint8_t& vo)
{
union S
{
float as_float;
int as_int;
};
S s;
s.as_float = static_cast<float>(vi);
s.as_int = s.as_int & 0xFF800000;
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f);
vo = t.as_byte[1];
}
template <>
__device__ void convert(const uint8_t vi, at::Half& vo)
{
union T
{
at::Half as_half;
uint8_t as_byte[2];
};
T t;
t.as_byte[0] = 0;
t.as_byte[1] = vi;
vo = t.as_half;
}
typedef enum{
MOMENT_MODE_0 =0, // L2 regularization mode
MOMENT_MODE_1 =1 // Decoupled weight decay mode
} adamMode_t;
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage1Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<5>& tl,
const MATH_T* per_tensor_beta1,
const MATH_T* per_tensor_beta2,
const MATH_T* per_tensor_beta3,
const int* per_tensor_bias_correction,
const int* step,
const MATH_T* per_tensor_epsilon,
adamMode_t mode,
const MATH_T* per_tensor_decay,
const MATH_T* global_scale,
const MATH_T* global_grad_norm,
const float max_grad_norm)
{
// I'd like this kernel to propagate infs/nans.
if (*noop_gmem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
float combined_scale = *global_scale;
if (max_grad_norm > 0) {
combined_scale = max_grad_norm / (*global_grad_norm / *global_scale + 1e-6);
combined_scale = *global_scale / std::min((float) 1.0, combined_scale);
}
MATH_T beta1 = per_tensor_beta1[tensor_num];
MATH_T beta2 = per_tensor_beta2[tensor_num];
MATH_T beta3 = 1 - beta1;
MATH_T beta1_correction, beta2_correction;
if (per_tensor_bias_correction[tensor_num] == 1) {
beta1_correction = 1 - pow(beta1, *step);
beta2_correction = 1 - pow(beta2, *step);
} else {
beta1_correction = (MATH_T) 1.0;
beta2_correction = (MATH_T) 1.0;
}
MATH_T epsilon = per_tensor_epsilon[tensor_num];
MATH_T decay = per_tensor_decay[tensor_num];
GRAD_T* g = (GRAD_T*)tl.addresses[0][tensor_loc];
g += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
T* m = (T*)tl.addresses[2][tensor_loc];
m += chunk_idx*chunk_size;
T* v = (T*)tl.addresses[3][tensor_loc];
v += chunk_idx*chunk_size;
MATH_T* u = (MATH_T*)tl.addresses[4][tensor_loc];
u += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(g) &&
is_aligned(p) &&
is_aligned(m) &&
is_aligned(v))
{
GRAD_T l_g[ILP];
T l_p[ILP];
T l_m[ILP];
T l_v[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(l_g, g, 0, i_start);
if (decay != 0)
load_store(l_p, p, 0, i_start);
load_store(l_m, m, 0, i_start);
load_store(l_v, v, 0, i_start);
// unpack
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_g[ii] = l_g[ii];
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = l_p[ii];
}
r_m[ii] = l_m[ii];
r_v[ii] = l_v[ii];
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / combined_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / combined_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
l_m[ii] = r_m[ii];
l_v[ii] = r_v[ii];
}
// store
load_store(u, r_p, i_start, 0);
load_store(m, l_m, i_start, 0);
load_store(v, l_v, i_start, 0);
}
}
else
{
// see note in multi_tensor_scale_kernel.cu
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_g[ii] = g[i];
// special ?optimization? for lamb stage 1
if (decay == 0) {
r_p[ii] = MATH_T(0);
}
else {
r_p[ii] = p[i];
}
r_m[ii] = m[i];
r_v[ii] = v[i];
} else {
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
if (mode == MOMENT_MODE_0) {
MATH_T scaled_grad = r_g[ii] / combined_scale;
// L2 on scaled grad
scaled_grad = scaled_grad + decay*r_p[ii];
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = next_m_unbiased / denom;
}
else {
MATH_T scaled_grad = r_g[ii] / combined_scale;
r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad;
r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad;
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]);
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
u[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
}
};
// Step 2 reads in 'update' value and per-tensor param_norm and update_norm.
// It computes new parameter value.
template<typename T, typename GRAD_T, typename MATH_T>
struct DistOptLAMBStage2Functor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
const MATH_T* per_tensor_param_norm,
const MATH_T* per_tensor_update_norm,
const long* update_norm_offset,
const MATH_T* learning_rate,
const MATH_T* per_tensor_decay,
const MATH_T* global_grad_norm,
bool use_nvlamb)
{
// I'd like this kernel to propagate infs/nans.
if (*noop_gmem == 1)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
MATH_T decay = per_tensor_decay[tensor_num];
MATH_T ratio = *learning_rate;
// nvlamb: apply adaptive learning rate to all parameters
// otherwise, only apply to those with non-zero weight decay
if (use_nvlamb || (decay != (MATH_T) 0.0))
{
MATH_T param_norm = per_tensor_param_norm[tensor_num];
MATH_T update_norm = per_tensor_update_norm[update_norm_offset[tensor_num]];
ratio = (update_norm != 0.0 && param_norm != 0.0) ? (*learning_rate) * (param_norm / update_norm) : (*learning_rate);
}
MATH_T* update = (MATH_T*)tl.addresses[0][tensor_loc];
update += chunk_idx*chunk_size;
T* p = (T*)tl.addresses[1][tensor_loc];
p += chunk_idx*chunk_size;
GRAD_T* p_copy = (GRAD_T*)tl.addresses[2][tensor_loc];
p_copy += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 &&
chunk_size % ILP == 0 &&
is_aligned(p) &&
is_aligned(update))
{
T r_p[ILP];
MATH_T r_update[ILP];
GRAD_T r_p_copy[ILP];
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_p, p, 0, i_start);
load_store(r_update, update, 0, i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * r_update[ii]);
convert(r_p[ii], r_p_copy[ii]);
}
load_store(p, r_p, i_start, 0);
load_store(p_copy, r_p_copy, i_start, 0);
}
}
else
{
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
MATH_T r_p[ILP];
MATH_T r_update[ILP];
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_p[ii] = p[i];
r_update[ii] = update[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_p[ii] = r_p[ii] - (ratio * r_update[ii]);
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
p[i] = r_p[ii];
convert(r_p[ii], p_copy[i]);
}
}
}
}
}
};
void multi_tensor_lamb_compute_update_term_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_beta1,
at::Tensor per_tensor_beta2,
at::Tensor per_tensor_beta3,
at::Tensor per_tensor_bias_correction,
at::Tensor step,
at::Tensor per_tensor_epsilon,
const int mode,
at::Tensor per_tensor_decay,
at::Tensor global_scale,
at::Tensor global_grad_norm,
const float max_grad_norm)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 1, "lamb_stage_1",
DISPATCH_FLOAT_AND_HALF(tensor_lists[4][0].scalar_type(), 2, "lamb_stage_1",
multi_tensor_apply<5>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage1Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_beta1.DATA_PTR<scalar_t_2>(),
per_tensor_beta2.DATA_PTR<scalar_t_2>(),
per_tensor_beta3.DATA_PTR<scalar_t_2>(),
per_tensor_bias_correction.DATA_PTR<int>(),
step.DATA_PTR<int>(),
per_tensor_epsilon.DATA_PTR<scalar_t_2>(),
(adamMode_t) mode,
per_tensor_decay.DATA_PTR<scalar_t_2>(),
global_scale.DATA_PTR<scalar_t_2>(),
global_grad_norm.DATA_PTR<scalar_t_2>(),
max_grad_norm); )))
AT_CUDA_CHECK(cudaGetLastError());
}
void multi_tensor_lamb_update_weights_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
at::Tensor per_tensor_param_norm,
at::Tensor per_tensor_update_norm,
at::Tensor update_norm_offset,
at::Tensor learning_rate,
at::Tensor per_tensor_decay,
at::Tensor global_grad_norm,
bool use_nvlamb)
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 0, "lamb_stage_2",
DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[2][0].scalar_type(), 1, "lamb_stage_2",
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 2, "lamb_stage_2",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
DistOptLAMBStage2Functor<scalar_t_0, scalar_t_1, scalar_t_2>(),
per_tensor_param_norm.DATA_PTR<scalar_t_2>(),
per_tensor_update_norm.DATA_PTR<scalar_t_2>(),
update_norm_offset.DATA_PTR<long>(),
learning_rate.DATA_PTR<scalar_t_2>(),
per_tensor_decay.DATA_PTR<scalar_t_2>(),
global_grad_norm.DATA_PTR<scalar_t_2>(),
use_nvlamb); )))
AT_CUDA_CHECK(cudaGetLastError());
}
|
fa5487e25e6b709c35007f0579228a2aeae1b23b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define IDX2D(a, i, stride, j) ((a)[(i)*(stride) + (j)])
__global__ void grayscale_kernel(double *z, unsigned char *output, size_t size, double z_min, double z_max) {
const double grid_size = blockDim.x*gridDim.x;
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
for (int i = idx; i < size; i += grid_size)
output[i] = (char) round((z[i]-z_min)/(z_max-z_min)*255);
} | fa5487e25e6b709c35007f0579228a2aeae1b23b.cu | #include "includes.h"
#define IDX2D(a, i, stride, j) ((a)[(i)*(stride) + (j)])
__global__ void grayscale_kernel(double *z, unsigned char *output, size_t size, double z_min, double z_max) {
const double grid_size = blockDim.x*gridDim.x;
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
for (int i = idx; i < size; i += grid_size)
output[i] = (char) round((z[i]-z_min)/(z_max-z_min)*255);
} |
42ce981990af4282575bd0f75b807d0bb12dbc6a.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file geo_app.cu
*
* @brief Geolocation Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/graphio/labels.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/geo/geo_enactor.cuh>
#include <gunrock/app/geo/geo_test.cuh>
namespace gunrock {
namespace app {
namespace geo {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<int>(
"geo-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations geolocation should run for (default=3).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"spatial-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1000,
"Number of maximum iterations spatial median "
"kernel should run for (default=1000).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"geo-complete",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
false,
"Run geolocation application until all locations for all nodes are "
"found, uses an atomic (default=false).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"labels-file",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"", "User locations label file for geolocation app.", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"debug",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
false,
"Debug label values, this prints out the entire labels array (longitude, "
"latitude).",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run geolocation tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ArrayT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ArrayT &h_latitude, ArrayT &h_longitude,
ArrayT &ref_predicted_lat, ArrayT &ref_predicted_lon,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
int geo_iter = parameters.Get<int>("geo-iter");
int spatial_iter = parameters.Get<int>("spatial-iter");
util::PrintMsg("Number of iterations: " + std::to_string(geo_iter),
!quiet_mode);
util::Info info("geolocation", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// Allocate problem specific host data array to
// extract device values to host
ValueT *h_predicted_lat = new ValueT[graph.nodes];
ValueT *h_predicted_lon = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
util::PrintMsg("Initializing problem ... ", !quiet_mode);
GUARD_CU(problem.Init(graph, target));
util::PrintMsg("Initializing enactor ... ", !quiet_mode);
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(h_latitude.GetPointer(util::HOST),
h_longitude.GetPointer(util::HOST), geo_iter,
spatial_iter, target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon));
SizeT num_errors =
Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon,
ref_predicted_lat, ref_predicted_lon, false);
}
}
cpu_timer.Start();
// Extract problem data
GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon));
if (validation == "last") {
SizeT num_errors =
Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon,
ref_predicted_lat, ref_predicted_lon, false);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace geo
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Geolocation(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// // End:
| 42ce981990af4282575bd0f75b807d0bb12dbc6a.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file geo_app.cu
*
* @brief Geolocation Application
*/
#include <gunrock/gunrock.h>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/graphio/labels.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/app/geo/geo_enactor.cuh>
#include <gunrock/app/geo/geo_test.cuh>
namespace gunrock {
namespace app {
namespace geo {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<int>(
"geo-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations geolocation should run for (default=3).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"spatial-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
1000,
"Number of maximum iterations spatial median "
"kernel should run for (default=1000).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"geo-complete",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
false,
"Run geolocation application until all locations for all nodes are "
"found, uses an atomic (default=false).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"labels-file",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
"", "User locations label file for geolocation app.", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"debug",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
false,
"Debug label values, this prints out the entire labels array (longitude, "
"latitude).",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run geolocation tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ArrayT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ArrayT &h_latitude, ArrayT &h_longitude,
ArrayT &ref_predicted_lat, ArrayT &ref_predicted_lon,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
int geo_iter = parameters.Get<int>("geo-iter");
int spatial_iter = parameters.Get<int>("spatial-iter");
util::PrintMsg("Number of iterations: " + std::to_string(geo_iter),
!quiet_mode);
util::Info info("geolocation", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// Allocate problem specific host data array to
// extract device values to host
ValueT *h_predicted_lat = new ValueT[graph.nodes];
ValueT *h_predicted_lon = new ValueT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
util::PrintMsg("Initializing problem ... ", !quiet_mode);
GUARD_CU(problem.Init(graph, target));
util::PrintMsg("Initializing enactor ... ", !quiet_mode);
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(h_latitude.GetPointer(util::HOST),
h_longitude.GetPointer(util::HOST), geo_iter,
spatial_iter, target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon));
SizeT num_errors =
Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon,
ref_predicted_lat, ref_predicted_lon, false);
}
}
cpu_timer.Start();
// Extract problem data
GUARD_CU(problem.Extract(h_predicted_lat, h_predicted_lon));
if (validation == "last") {
SizeT num_errors =
Validate_Results(parameters, graph, h_predicted_lat, h_predicted_lon,
ref_predicted_lat, ref_predicted_lon, false);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace geo
} // namespace app
} // namespace gunrock
// ===========================================================================================
// ========================= CODE BELOW THIS LINE NOT NEEDED FOR TESTS
// =======================
// ===========================================================================================
// /*
// * @brief Entry of gunrock_template function
// * @tparam GraphT Type of the graph
// * @tparam ValueT Type of the distances
// * @param[in] parameters Excution parameters
// * @param[in] graph Input graph
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// */
// template <typename GraphT, typename ValueT = typename GraphT::ValueT>
// double gunrock_Template(
// gunrock::util::Parameters ¶meters,
// GraphT &graph
// // TODO: add problem specific outputs, e.g.:
// //ValueT **distances
// )
// {
// typedef typename GraphT::VertexT VertexT;
// typedef gunrock::app::Template::Problem<GraphT > ProblemT;
// typedef gunrock::app::Template::Enactor<ProblemT> EnactorT;
// gunrock::util::CpuTimer cpu_timer;
// gunrock::util::Location target = gunrock::util::DEVICE;
// double total_time = 0;
// if (parameters.UseDefault("quiet"))
// parameters.Set("quiet", true);
// // Allocate problem and enactor on GPU, and initialize them
// ProblemT problem(parameters);
// EnactorT enactor;
// problem.Init(graph , target);
// enactor.Init(problem, target);
// int num_runs = parameters.Get<int>("num-runs");
// // TODO: get problem specific inputs, e.g.:
// // std::vector<VertexT> srcs =
// parameters.Get<std::vector<VertexT>>("srcs");
// // int num_srcs = srcs.size();
// for (int run_num = 0; run_num < num_runs; ++run_num)
// {
// // TODO: problem specific inputs, e.g.:
// // int src_num = run_num % num_srcs;
// // VertexT src = srcs[src_num];
// problem.Reset(/*src,*/ target);
// enactor.Reset(/*src,*/ target);
// cpu_timer.Start();
// enactor.Enact(/*src*/);
// cpu_timer.Stop();
// total_time += cpu_timer.ElapsedMillis();
// // TODO: extract problem specific data, e.g.:
// problem.Extract(/*distances[src_num]*/);
// }
// enactor.Release(target);
// problem.Release(target);
// // TODO: problem specific clean ups, e.g.:
// // srcs.clear();
// return total_time;
// }
// * @brief Simple interface take in graph as CSR format
// * @param[in] num_nodes Number of veritces in the input graph
// * @param[in] num_edges Number of edges in the input graph
// * @param[in] row_offsets CSR-formatted graph input row offsets
// * @param[in] col_indices CSR-formatted graph input column indices
// * @param[in] edge_values CSR-formatted graph input edge weights
// * @param[in] num_runs Number of runs to perform SSSP
// * @param[in] sources Sources to begin traverse, one for each run
// * @param[in] mark_preds Whether to output predecessor info
// * @param[out] distances Return shortest distance to source per vertex
// * @param[out] preds Return predecessors of each vertex
// * \return double Return accumulated elapsed times for all runs
// template <
// typename VertexT = int,
// typename SizeT = int,
// typename GValueT = unsigned int,
// typename TValueT = GValueT>
// float Geolocation(
// const SizeT num_nodes,
// const SizeT num_edges,
// const SizeT *row_offsets,
// const VertexT *col_indices,
// const GValueT *edge_values,
// const int num_runs
// // TODO: add problem specific inputs and outputs, e.g.:
// // VertexT *sources,
// // SSSPValueT **distances
// )
// {
// // TODO: change to other graph representation, if not using CSR
// typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
// gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR>
// GraphT;
// typedef typename GraphT::CsrT CsrT;
// // Setup parameters
// gunrock::util::Parameters parameters("Template");
// gunrock::graphio::UseParameters(parameters);
// gunrock::app::Template::UseParameters(parameters);
// gunrock::app::UseParameters_test(parameters);
// parameters.Parse_CommandLine(0, NULL);
// parameters.Set("graph-type", "by-pass");
// parameters.Set("num-runs", num_runs);
// // TODO: problem specific inputs, e.g.:
// // std::vector<VertexT> srcs;
// // for (int i = 0; i < num_runs; i ++)
// // srcs.push_back(sources[i]);
// // parameters.Set("srcs", srcs);
// bool quiet = parameters.Get<bool>("quiet");
// GraphT graph;
// // Assign pointers into gunrock graph format
// // TODO: change to other graph representation, if not using CSR
// graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
// graph.CsrT::row_offsets .SetPointer(row_offsets, num_nodes + 1,
// gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices,
// num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), true, quiet);
// gunrock::graphio::LoadGraph(parameters, graph);
// // Run the Template
// // TODO: add problem specific outputs, e.g.
// double elapsed_time = gunrock_Template(parameters, graph /*,
// distances*/);
// // Cleanup
// graph.Release();
// // TODO: problem specific cleanup
// // srcs.clear();
// return elapsed_time;
// }
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// // End:
|
ca2190a516a502eb32ae96cf9eb3c33835bbe90d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Image data
unsigned char *pixels = NULL;
unsigned char *gpu_pixels;
int gpu_size = 0;
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
gImageWidth = width;
gImageHeight = height;
// Allocation of the matrix on the GPU
if (gpu_pixels) hipFree(gpu_pixels);
gpu_size = sizeof(unsigned char) * width * height * 4;
hipMalloc((void**)&gpu_pixels, gpu_size);
}
#define DIM 512
// Select precision here! float or double!
#define MYFLOAT double
// User controlled parameters
int maxiter = 20;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
// Complex number class
struct hipComplex
{
MYFLOAT r;
MYFLOAT i;
__device__ hipComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__global__
void mandelbrot_kernel(unsigned char *gpu_pixels, int maxiter, int gImageWidth, int gImageHeight, MYFLOAT offsetx, MYFLOAT offsety, MYFLOAT scale)
{
int x = threadIdx.x;
int y = blockIdx.x;
//printf("%d %d \n",x,y);
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
int stop = 0;
for (i=0; i<maxiter && !stop; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
stop = 1;
}
int fractalValue = i;
// printf("%d\n",i);
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
int offset = x + y * gImageWidth;
gpu_pixels[offset*4 + 0] = red;
gpu_pixels[offset*4 + 1] = green;
gpu_pixels[offset*4 + 2] = blue;
gpu_pixels[offset*4 + 3] = 255;
}
int mandelbrot( int x, int y)
{/*
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;*/
return 0;
}
void computeFractal( unsigned char *ptr)
{
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float duration = 0;
dim3 dimBlock(DIM, 1);
dim3 dimGrid(DIM,1 );
hipEventRecord(start, 0);
hipLaunchKernelGGL(( mandelbrot_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_pixels, maxiter, gImageWidth, gImageHeight, offsetx, offsety, scale);
// map from x, y to pixel position
/* for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&duration, start, stop);
printf("%f\n",(double)duration/1000);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy( pixels, gpu_pixels, gpu_size, hipMemcpyDeviceToHost );
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
computeFractal(pixels);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (CPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
| ca2190a516a502eb32ae96cf9eb3c33835bbe90d.cu | // Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Image data
unsigned char *pixels = NULL;
unsigned char *gpu_pixels;
int gpu_size = 0;
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
gImageWidth = width;
gImageHeight = height;
// Allocation of the matrix on the GPU
if (gpu_pixels) cudaFree(gpu_pixels);
gpu_size = sizeof(unsigned char) * width * height * 4;
cudaMalloc((void**)&gpu_pixels, gpu_size);
}
#define DIM 512
// Select precision here! float or double!
#define MYFLOAT double
// User controlled parameters
int maxiter = 20;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
// Complex number class
struct cuComplex
{
MYFLOAT r;
MYFLOAT i;
__device__ cuComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__global__
void mandelbrot_kernel(unsigned char *gpu_pixels, int maxiter, int gImageWidth, int gImageHeight, MYFLOAT offsetx, MYFLOAT offsety, MYFLOAT scale)
{
int x = threadIdx.x;
int y = blockIdx.x;
//printf("%d %d \n",x,y);
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
int stop = 0;
for (i=0; i<maxiter && !stop; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
stop = 1;
}
int fractalValue = i;
// printf("%d\n",i);
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
int offset = x + y * gImageWidth;
gpu_pixels[offset*4 + 0] = red;
gpu_pixels[offset*4 + 1] = green;
gpu_pixels[offset*4 + 2] = blue;
gpu_pixels[offset*4 + 3] = 255;
}
int mandelbrot( int x, int y)
{/*
MYFLOAT jx = scale * (MYFLOAT)(gImageWidth/2 - x + offsetx/scale)/(gImageWidth/2);
MYFLOAT jy = scale * (MYFLOAT)(gImageHeight/2 - y + offsety/scale)/(gImageWidth/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000)
return i;
}
return i;*/
return 0;
}
void computeFractal( unsigned char *ptr)
{
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float duration = 0;
dim3 dimBlock(DIM, 1);
dim3 dimGrid(DIM,1 );
cudaEventRecord(start, 0);
mandelbrot_kernel<<<dimGrid, dimBlock>>>(gpu_pixels, maxiter, gImageWidth, gImageHeight, offsetx, offsety, scale);
// map from x, y to pixel position
/* for (int x = 0; x < gImageWidth; x++)
for (int y = 0; y < gImageHeight; y++)
{
int offset = x + y * gImageWidth;
// now calculate the value at that position
int fractalValue = mandelbrot( x, y);
// Colorize it
int red = 255 * fractalValue/maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/maxiter;
if (blue > 255) blue = 255 - blue;
ptr[offset*4 + 0] = red;
ptr[offset*4 + 1] = green;
ptr[offset*4 + 2] = blue;
ptr[offset*4 + 3] = 255;
}
*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&duration, start, stop);
printf("%f\n",(double)duration/1000);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy( pixels, gpu_pixels, gpu_size, cudaMemcpyDeviceToHost );
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void *font, const char *string)
{
int i;
for (i = 0; string[i]; i++)
glutBitmapCharacter(font, string[i]);
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
computeFractal(pixels);
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help)
PrintHelp();
glutSwapBuffers();
}
char explore = 1;
static void Reshape(int width, int height)
{
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("Mandelbrot explorer (CPU)");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
}
|
76cb56043690287341c02839c7970a652f76b699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float mean = mean_arr_gpu[f];
binary[i] = (weights[i] > 0) ? mean : -mean;
} | 76cb56043690287341c02839c7970a652f76b699.cu | #include "includes.h"
__global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int f = i / size;
if (f >= n) return;
float mean = mean_arr_gpu[f];
binary[i] = (weights[i] > 0) ? mean : -mean;
} |
f7917f22b3dfd95fa01aaf9981dee5cb3292887f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *c)
{
c[threadIdx.x] = threadIdx.x;
}
__global__
void square(float *c)
{
c[threadIdx.x] = sqrt(float (threadIdx.x));
}
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
hipMalloc( (void**)&cd, size );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hipLaunchKernelGGL(( simple), dim3(dimGrid), dim3(dimBlock), 0, 0, cd);
hipDeviceSynchronize();
hipMemcpy( c, cd, size, hipMemcpyDeviceToHost );
hipFree( cd );
for (int i = 0; i < N; i++)
printf("%f ", c[i]);
printf("\n");
delete[] c;
printf("done\n");
return EXIT_SUCCESS;
}
| f7917f22b3dfd95fa01aaf9981dee5cb3292887f.cu | // Simple CUDA example by Ingemar Ragnemalm 2009. Simplest possible?
// Assigns every element in an array with its index.
// nvcc simple.cu -L /usr/local/cuda/lib -lcudart -o simple
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__
void simple(float *c)
{
c[threadIdx.x] = threadIdx.x;
}
__global__
void square(float *c)
{
c[threadIdx.x] = sqrt(float (threadIdx.x));
}
int main()
{
float *c = new float[N];
float *cd;
const int size = N*sizeof(float);
cudaMalloc( (void**)&cd, size );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
simple<<<dimGrid, dimBlock>>>(cd);
cudaThreadSynchronize();
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( cd );
for (int i = 0; i < N; i++)
printf("%f ", c[i]);
printf("\n");
delete[] c;
printf("done\n");
return EXIT_SUCCESS;
}
|
25f8f1fb95db47ecd08e9d2670b8ea3d14c772c5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the scalar-tridiagonal solver distribution.
*
* Copyright (c) 2015, Endre Lszl and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Endre Lszl may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Endre Lszl ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Endre Lszl BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <float.h>
#include <sys/time.h>
#include "tridsolver.h"
#include "trid_mpi_solver_params.hpp"
#include "trid_common.h"
#include "cutil_inline.h"
#include "omp.h"
//#include "offload.h"
#include "mpi.h"
#ifdef __MKL__
//#include "lapacke.h"
#include "mkl_lapacke.h"
//#include "mkl.h"
#endif
#include "adi_mpi.h"
#include "preproc_mpi_cuda.hpp"
#define ROUND_DOWN(N,step) (((N)/(step))*step)
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
extern char *optarg;
extern int optind, opterr, optopt;
static struct option options[] = {
{"devid", required_argument, 0, 0 },
{"nx", required_argument, 0, 0 },
{"ny", required_argument, 0, 0 },
{"nz", required_argument, 0, 0 },
{"iter", required_argument, 0, 0 },
{"opt", required_argument, 0, 0 },
{"prof", required_argument, 0, 0 },
{"help", no_argument, 0, 'h' },
{"b", required_argument, 0, 0 },
{"m", required_argument, 0, 0 },
{0, 0, 0, 0 }
};
// Function for calculating local problem size for a MPI process, as well as its
// global start and end indices.
void setStartEnd(int *start, int *end, int coord, int numProcs, int numElements) {
int tmp = numElements / numProcs;
int remainder = numElements % numProcs;
int total = 0;
for(int i = 0; i < coord; i++) {
if(i < remainder) {
total += tmp + 1;
} else {
total += tmp;
}
}
*start = total;
if(coord < remainder) {
*end = *start + tmp;
} else {
*end = *start + tmp - 1;
}
}
/*
* Print essential infromation on the use of the program
*/
void print_help() {
printf("Please specify the ADI configuration, e.g.: \n$ ./adi_* -nx NX -ny NY -nz NZ -iter ITER [-opt CUDAOPT] -prof PROF\n");
exit(0);
}
// Timing functions
inline double elapsed_time(double *et) {
struct timeval t;
double old_time = *et;
gettimeofday( &t, (struct timezone *)0 );
*et = t.tv_sec + t.tv_usec*1.0e-6;
return *et - old_time;
}
inline void timing_start(double *timer) {
elapsed_time(timer);
}
inline void timing_end(double *timer, double *elapsed_accumulate) {
double elapsed = elapsed_time(timer);
*elapsed_accumulate += elapsed;
}
// Function to add up a distributed array and print the result
void rms(const char* name, FP* array, app_handle &handle) {
//Sum the square of values in app.h_u
double sum = 0.0;
for(int k = 0; k < handle.size[2]; k++) {
for(int j = 0; j < handle.size[1]; j++) {
for(int i = 0; i < handle.size[0]; i++) {
int ind = k * handle.size[0] * handle.size[1] + j * handle.size[0] + i;
sum += array[ind];
}
}
}
double global_sum = 0.0;
MPI_Allreduce(&sum, &global_sum,1, MPI_DOUBLE,MPI_SUM, handle.comm);
if(handle.coords[0] == 0 && handle.coords[1] == 0 && handle.coords[2] == 0) {
printf("%s sum = %.15g\n", name, global_sum);
}
}
// Initialize the ADI application
int init(app_handle &app, preproc_handle<FP> &pre_handle, int &iter, int argc, char* argv[]) {
if( MPI_Init(&argc,&argv) != MPI_SUCCESS) { printf("MPI Couldn't initialize. Exiting"); exit(-1);}
int devid = 0;
int nx_g = 256;
int ny_g = 256;
int nz_g = 256;
iter = 10;
int opt = 0;
int prof = 1;
int batchSize = 16384;
int m = 1;
pre_handle.lambda = 1.0f;
// Process arguments
int opt_index = 0;
while( getopt_long_only(argc, argv, "", options, &opt_index) != -1) {
if(strcmp((char*)options[opt_index].name,"devid") == 0) devid = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"nx" ) == 0) nx_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"ny" ) == 0) ny_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"nz" ) == 0) nz_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"iter") == 0) iter = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"opt" ) == 0) opt = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"prof") == 0) prof = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"help") == 0) print_help();
if(strcmp((char*)options[opt_index].name,"b" ) == 0) batchSize = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"m" ) == 0) m = atoi(optarg);
}
// Allocate memory to store problem characteristics
app.size_g = (int *) calloc(3, sizeof(int));
app.size = (int *) calloc(3, sizeof(int));
app.start_g = (int *) calloc(3, sizeof(int));
app.end_g = (int *) calloc(3, sizeof(int));
app.size_g[0] = nx_g;
app.size_g[1] = ny_g;
app.size_g[2] = nz_g;
// Set up MPI for tridiagonal solver
int procs, rank;
MPI_Comm_size(MPI_COMM_WORLD, &procs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Create 3D Cartesian MPI topology
app.pdims = (int *) calloc(3, sizeof(int));
int *periodic = (int *) calloc(3, sizeof(int)); //false
app.coords = (int *) calloc(3, sizeof(int));
MPI_Dims_create(procs, 3, app.pdims);
// Setup up which GPU this MPI process is using
// Currently set for 4 GPUs per node, with 1 MPI process per GPU
//devid = rank % 4;
cudaSafeCall( hipSetDevice(devid) );
cutilDeviceInit(argc, argv);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
// Create 3D Cartesian MPI communicator
MPI_Cart_create(MPI_COMM_WORLD, 3, app.pdims, periodic, 0, &app.comm);
int my_cart_rank;
MPI_Comm_rank(app.comm, &my_cart_rank);
MPI_Cart_coords(app.comm, my_cart_rank, 3, app.coords);
// Create MPI handle used by tridiagonal solver
switch(m) {
case 1:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::ALLGATHER);
break;
case 2:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::LATENCY_HIDING_TWO_STEP);
break;
case 3:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::LATENCY_HIDING_INTERLEAVED);
break;
case 4:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::JACOBI);
break;
case 5:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::PCR);
break;
default:
exit(-1);
}
// Calculate local problem size for this MPI process
for(int i = 0; i < 3; i++) {
setStartEnd(&app.start_g[i], &app.end_g[i], app.coords[i], app.pdims[i], app.size_g[i]);
app.size[i] = app.end_g[i] - app.start_g[i] + 1;
}
free(periodic);
if(rank==0) {
printf("\nGlobal grid dimensions: %d x %d x %d\n",
app.size_g[0], app.size_g[1], app.size_g[2]);
printf("\nNumber of MPI procs in each dimenstion %d, %d, %d\n",
app.pdims[0], app.pdims[1], app.pdims[2]);
}
// Allocate memory for local section of problem
int size = app.size[0] * app.size[1] * app.size[2];
cudaSafeCall( hipMalloc((void **)&app.a, size * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&app.b, size * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&app.c, size * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&app.d, size * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&app.u, size * sizeof(FP)) );
FP *h_u = (FP *) malloc(sizeof(FP) * size);
// Initialize
for(int k = 0; k < app.size[2]; k++) {
for(int j = 0; j < app.size[1]; j++) {
for(int i = 0; i < app.size[0]; i++) {
int ind = k * app.size[0] * app.size[1] + j*app.size[0] + i;
if( (app.start_g[0]==0 && i==0) ||
(app.end_g[0]==app.size_g[0]-1 && i==app.size[0]-1) ||
(app.start_g[1]==0 && j==0) ||
(app.end_g[1]==app.size_g[1]-1 && j==app.size[1]-1) ||
(app.start_g[2]==0 && k==0) ||
(app.end_g[2]==app.size_g[2]-1 && k==app.size[2]-1)) {
h_u[ind] = 1.0f;
} else {
h_u[ind] = 0.0f;
}
}
}
}
// Copy initial values to GPU memory
cudaSafeCall( hipMemcpy(app.u, h_u, sizeof(FP) * size, hipMemcpyHostToDevice) );
free(h_u);
// Allocate memory used in each iteration's preprocessing
pre_handle.rcv_size_x = 2 * app.size[1] * app.size[2];
pre_handle.rcv_size_y = 2 * app.size[0] * app.size[2];
pre_handle.rcv_size_z = 2 * app.size[1] * app.size[0];
pre_handle.halo_snd_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP));
pre_handle.halo_rcv_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP));
pre_handle.halo_snd_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP));
pre_handle.halo_rcv_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP));
pre_handle.halo_snd_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP));
pre_handle.halo_rcv_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP));
cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_x, pre_handle.rcv_size_x * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_y, pre_handle.rcv_size_y * sizeof(FP)) );
cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_z, pre_handle.rcv_size_z * sizeof(FP)) );
return 0;
}
// Free memory used
void finalize(app_handle &app, preproc_handle<FP> &pre_handle) {
free(pre_handle.halo_snd_x);
free(pre_handle.halo_rcv_x);
free(pre_handle.halo_snd_y);
free(pre_handle.halo_rcv_y);
free(pre_handle.halo_snd_z);
free(pre_handle.halo_rcv_z);
cudaSafeCall( hipFree(pre_handle.rcv_x) );
cudaSafeCall( hipFree(pre_handle.rcv_y) );
cudaSafeCall( hipFree(pre_handle.rcv_z) );
cudaSafeCall( hipFree(app.a) );
cudaSafeCall( hipFree(app.b) );
cudaSafeCall( hipFree(app.c) );
cudaSafeCall( hipFree(app.d) );
cudaSafeCall( hipFree(app.u) );
free(app.size_g);
free(app.size);
free(app.start_g);
free(app.end_g);
free(app.pdims);
free(app.coords);
delete app.params;
}
int main(int argc, char* argv[]) {
app_handle app;
preproc_handle<FP> pre_handle;
int iter;
// Initialize
init(app, pre_handle, iter, argc, argv);
// Declare and reset elapsed time counters
double timer = 0.0;
double timer1 = 0.0;
double elapsed_total = 0.0;
double elapsed_preproc = 0.0;
double elapsed_trid_x = 0.0;
double elapsed_trid_y = 0.0;
double elapsed_trid_z = 0.0;
timing_start(&timer1);
// Allocate memory used in sums of distributed arrays
FP *h_u = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]);
FP *du = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]);
TridParams trid_params;
trid_params.mpi_params = (void *)app.params;
// Iterate over specified number of time steps
for(int it = 0; it < iter; it++) {
// Preprocess
timing_start(&timer);
preproc_mpi_cuda<FP>(pre_handle, app);
timing_end(&timer, &elapsed_preproc);
cudaSafeCall( hipDeviceSynchronize() );
//
// perform tri-diagonal solves in x-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 0, app.size, app.size);
#else
tridDmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 0, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_x);
//
// perform tri-diagonal solves in y-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 1, app.size, app.size);
#else
tridDmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 1, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_y);
//
// perform tri-diagonal solves in z-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatchInc(&trid_params, app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size);
#else
tridDmtsvStridedBatchInc(&trid_params, app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_z);
}
timing_end(&timer1, &elapsed_total);
// Print sum of these arrays (basic error checking)
cudaSafeCall( hipMemcpy(h_u, app.u, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(du, app.d, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], hipMemcpyDeviceToHost) );
rms("end h", h_u, app);
rms("end d", du, app);
MPI_Barrier(MPI_COMM_WORLD);
free(h_u);
free(du);
MPI_Barrier(MPI_COMM_WORLD);
// Print out timings of each section
if(app.coords[0] == 0 && app.coords[1] == 0 && app.coords[2] == 0) {
// Print execution times
printf("Time per section: \n[total] \t[prepro] \t[trid_x] \t[trid_y] \t[trid_z]\n");
printf("%e \t%e \t%e \t%e \t%e\n",
elapsed_total,
elapsed_preproc,
elapsed_trid_x,
elapsed_trid_y,
elapsed_trid_z);
}
MPI_Barrier(MPI_COMM_WORLD);
// Free memory
finalize(app, pre_handle);
MPI_Finalize();
hipDeviceReset();
return 0;
}
| 25f8f1fb95db47ecd08e9d2670b8ea3d14c772c5.cu | /*
* Open source copyright declaration based on BSD open source template:
* http://www.opensource.org/licenses/bsd-license.php
*
* This file is part of the scalar-tridiagonal solver distribution.
*
* Copyright (c) 2015, Endre László and others. Please see the AUTHORS file in
* the main source directory for a full list of copyright holders.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of Endre László may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Endre László ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Endre László BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <float.h>
#include <sys/time.h>
#include "tridsolver.h"
#include "trid_mpi_solver_params.hpp"
#include "trid_common.h"
#include "cutil_inline.h"
#include "omp.h"
//#include "offload.h"
#include "mpi.h"
#ifdef __MKL__
//#include "lapacke.h"
#include "mkl_lapacke.h"
//#include "mkl.h"
#endif
#include "adi_mpi.h"
#include "preproc_mpi_cuda.hpp"
#define ROUND_DOWN(N,step) (((N)/(step))*step)
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
extern char *optarg;
extern int optind, opterr, optopt;
static struct option options[] = {
{"devid", required_argument, 0, 0 },
{"nx", required_argument, 0, 0 },
{"ny", required_argument, 0, 0 },
{"nz", required_argument, 0, 0 },
{"iter", required_argument, 0, 0 },
{"opt", required_argument, 0, 0 },
{"prof", required_argument, 0, 0 },
{"help", no_argument, 0, 'h' },
{"b", required_argument, 0, 0 },
{"m", required_argument, 0, 0 },
{0, 0, 0, 0 }
};
// Function for calculating local problem size for a MPI process, as well as its
// global start and end indices.
void setStartEnd(int *start, int *end, int coord, int numProcs, int numElements) {
int tmp = numElements / numProcs;
int remainder = numElements % numProcs;
int total = 0;
for(int i = 0; i < coord; i++) {
if(i < remainder) {
total += tmp + 1;
} else {
total += tmp;
}
}
*start = total;
if(coord < remainder) {
*end = *start + tmp;
} else {
*end = *start + tmp - 1;
}
}
/*
* Print essential infromation on the use of the program
*/
void print_help() {
printf("Please specify the ADI configuration, e.g.: \n$ ./adi_* -nx NX -ny NY -nz NZ -iter ITER [-opt CUDAOPT] -prof PROF\n");
exit(0);
}
// Timing functions
inline double elapsed_time(double *et) {
struct timeval t;
double old_time = *et;
gettimeofday( &t, (struct timezone *)0 );
*et = t.tv_sec + t.tv_usec*1.0e-6;
return *et - old_time;
}
inline void timing_start(double *timer) {
elapsed_time(timer);
}
inline void timing_end(double *timer, double *elapsed_accumulate) {
double elapsed = elapsed_time(timer);
*elapsed_accumulate += elapsed;
}
// Function to add up a distributed array and print the result
void rms(const char* name, FP* array, app_handle &handle) {
//Sum the square of values in app.h_u
double sum = 0.0;
for(int k = 0; k < handle.size[2]; k++) {
for(int j = 0; j < handle.size[1]; j++) {
for(int i = 0; i < handle.size[0]; i++) {
int ind = k * handle.size[0] * handle.size[1] + j * handle.size[0] + i;
sum += array[ind];
}
}
}
double global_sum = 0.0;
MPI_Allreduce(&sum, &global_sum,1, MPI_DOUBLE,MPI_SUM, handle.comm);
if(handle.coords[0] == 0 && handle.coords[1] == 0 && handle.coords[2] == 0) {
printf("%s sum = %.15g\n", name, global_sum);
}
}
// Initialize the ADI application
int init(app_handle &app, preproc_handle<FP> &pre_handle, int &iter, int argc, char* argv[]) {
if( MPI_Init(&argc,&argv) != MPI_SUCCESS) { printf("MPI Couldn't initialize. Exiting"); exit(-1);}
int devid = 0;
int nx_g = 256;
int ny_g = 256;
int nz_g = 256;
iter = 10;
int opt = 0;
int prof = 1;
int batchSize = 16384;
int m = 1;
pre_handle.lambda = 1.0f;
// Process arguments
int opt_index = 0;
while( getopt_long_only(argc, argv, "", options, &opt_index) != -1) {
if(strcmp((char*)options[opt_index].name,"devid") == 0) devid = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"nx" ) == 0) nx_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"ny" ) == 0) ny_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"nz" ) == 0) nz_g = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"iter") == 0) iter = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"opt" ) == 0) opt = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"prof") == 0) prof = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"help") == 0) print_help();
if(strcmp((char*)options[opt_index].name,"b" ) == 0) batchSize = atoi(optarg);
if(strcmp((char*)options[opt_index].name,"m" ) == 0) m = atoi(optarg);
}
// Allocate memory to store problem characteristics
app.size_g = (int *) calloc(3, sizeof(int));
app.size = (int *) calloc(3, sizeof(int));
app.start_g = (int *) calloc(3, sizeof(int));
app.end_g = (int *) calloc(3, sizeof(int));
app.size_g[0] = nx_g;
app.size_g[1] = ny_g;
app.size_g[2] = nz_g;
// Set up MPI for tridiagonal solver
int procs, rank;
MPI_Comm_size(MPI_COMM_WORLD, &procs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Create 3D Cartesian MPI topology
app.pdims = (int *) calloc(3, sizeof(int));
int *periodic = (int *) calloc(3, sizeof(int)); //false
app.coords = (int *) calloc(3, sizeof(int));
MPI_Dims_create(procs, 3, app.pdims);
// Setup up which GPU this MPI process is using
// Currently set for 4 GPUs per node, with 1 MPI process per GPU
//devid = rank % 4;
cudaSafeCall( cudaSetDevice(devid) );
cutilDeviceInit(argc, argv);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
// Create 3D Cartesian MPI communicator
MPI_Cart_create(MPI_COMM_WORLD, 3, app.pdims, periodic, 0, &app.comm);
int my_cart_rank;
MPI_Comm_rank(app.comm, &my_cart_rank);
MPI_Cart_coords(app.comm, my_cart_rank, 3, app.coords);
// Create MPI handle used by tridiagonal solver
switch(m) {
case 1:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::ALLGATHER);
break;
case 2:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::LATENCY_HIDING_TWO_STEP);
break;
case 3:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::LATENCY_HIDING_INTERLEAVED);
break;
case 4:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::JACOBI);
break;
case 5:
app.params = new MpiSolverParams(app.comm, 3, app.pdims, batchSize, MpiSolverParams::PCR);
break;
default:
exit(-1);
}
// Calculate local problem size for this MPI process
for(int i = 0; i < 3; i++) {
setStartEnd(&app.start_g[i], &app.end_g[i], app.coords[i], app.pdims[i], app.size_g[i]);
app.size[i] = app.end_g[i] - app.start_g[i] + 1;
}
free(periodic);
if(rank==0) {
printf("\nGlobal grid dimensions: %d x %d x %d\n",
app.size_g[0], app.size_g[1], app.size_g[2]);
printf("\nNumber of MPI procs in each dimenstion %d, %d, %d\n",
app.pdims[0], app.pdims[1], app.pdims[2]);
}
// Allocate memory for local section of problem
int size = app.size[0] * app.size[1] * app.size[2];
cudaSafeCall( cudaMalloc((void **)&app.a, size * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&app.b, size * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&app.c, size * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&app.d, size * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&app.u, size * sizeof(FP)) );
FP *h_u = (FP *) malloc(sizeof(FP) * size);
// Initialize
for(int k = 0; k < app.size[2]; k++) {
for(int j = 0; j < app.size[1]; j++) {
for(int i = 0; i < app.size[0]; i++) {
int ind = k * app.size[0] * app.size[1] + j*app.size[0] + i;
if( (app.start_g[0]==0 && i==0) ||
(app.end_g[0]==app.size_g[0]-1 && i==app.size[0]-1) ||
(app.start_g[1]==0 && j==0) ||
(app.end_g[1]==app.size_g[1]-1 && j==app.size[1]-1) ||
(app.start_g[2]==0 && k==0) ||
(app.end_g[2]==app.size_g[2]-1 && k==app.size[2]-1)) {
h_u[ind] = 1.0f;
} else {
h_u[ind] = 0.0f;
}
}
}
}
// Copy initial values to GPU memory
cudaSafeCall( cudaMemcpy(app.u, h_u, sizeof(FP) * size, cudaMemcpyHostToDevice) );
free(h_u);
// Allocate memory used in each iteration's preprocessing
pre_handle.rcv_size_x = 2 * app.size[1] * app.size[2];
pre_handle.rcv_size_y = 2 * app.size[0] * app.size[2];
pre_handle.rcv_size_z = 2 * app.size[1] * app.size[0];
pre_handle.halo_snd_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP));
pre_handle.halo_rcv_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP));
pre_handle.halo_snd_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP));
pre_handle.halo_rcv_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP));
pre_handle.halo_snd_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP));
pre_handle.halo_rcv_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP));
cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_x, pre_handle.rcv_size_x * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_y, pre_handle.rcv_size_y * sizeof(FP)) );
cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_z, pre_handle.rcv_size_z * sizeof(FP)) );
return 0;
}
// Free memory used
void finalize(app_handle &app, preproc_handle<FP> &pre_handle) {
free(pre_handle.halo_snd_x);
free(pre_handle.halo_rcv_x);
free(pre_handle.halo_snd_y);
free(pre_handle.halo_rcv_y);
free(pre_handle.halo_snd_z);
free(pre_handle.halo_rcv_z);
cudaSafeCall( cudaFree(pre_handle.rcv_x) );
cudaSafeCall( cudaFree(pre_handle.rcv_y) );
cudaSafeCall( cudaFree(pre_handle.rcv_z) );
cudaSafeCall( cudaFree(app.a) );
cudaSafeCall( cudaFree(app.b) );
cudaSafeCall( cudaFree(app.c) );
cudaSafeCall( cudaFree(app.d) );
cudaSafeCall( cudaFree(app.u) );
free(app.size_g);
free(app.size);
free(app.start_g);
free(app.end_g);
free(app.pdims);
free(app.coords);
delete app.params;
}
int main(int argc, char* argv[]) {
app_handle app;
preproc_handle<FP> pre_handle;
int iter;
// Initialize
init(app, pre_handle, iter, argc, argv);
// Declare and reset elapsed time counters
double timer = 0.0;
double timer1 = 0.0;
double elapsed_total = 0.0;
double elapsed_preproc = 0.0;
double elapsed_trid_x = 0.0;
double elapsed_trid_y = 0.0;
double elapsed_trid_z = 0.0;
timing_start(&timer1);
// Allocate memory used in sums of distributed arrays
FP *h_u = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]);
FP *du = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]);
TridParams trid_params;
trid_params.mpi_params = (void *)app.params;
// Iterate over specified number of time steps
for(int it = 0; it < iter; it++) {
// Preprocess
timing_start(&timer);
preproc_mpi_cuda<FP>(pre_handle, app);
timing_end(&timer, &elapsed_preproc);
cudaSafeCall( cudaDeviceSynchronize() );
//
// perform tri-diagonal solves in x-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 0, app.size, app.size);
#else
tridDmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 0, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_x);
//
// perform tri-diagonal solves in y-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 1, app.size, app.size);
#else
tridDmtsvStridedBatch(&trid_params, app.a, app.b, app.c, app.d, 3, 1, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_y);
//
// perform tri-diagonal solves in z-direction
//
timing_start(&timer);
#if FPPREC == 0
tridSmtsvStridedBatchInc(&trid_params, app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size);
#else
tridDmtsvStridedBatchInc(&trid_params, app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size);
#endif
timing_end(&timer, &elapsed_trid_z);
}
timing_end(&timer1, &elapsed_total);
// Print sum of these arrays (basic error checking)
cudaSafeCall( cudaMemcpy(h_u, app.u, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(du, app.d, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], cudaMemcpyDeviceToHost) );
rms("end h", h_u, app);
rms("end d", du, app);
MPI_Barrier(MPI_COMM_WORLD);
free(h_u);
free(du);
MPI_Barrier(MPI_COMM_WORLD);
// Print out timings of each section
if(app.coords[0] == 0 && app.coords[1] == 0 && app.coords[2] == 0) {
// Print execution times
printf("Time per section: \n[total] \t[prepro] \t[trid_x] \t[trid_y] \t[trid_z]\n");
printf("%e \t%e \t%e \t%e \t%e\n",
elapsed_total,
elapsed_preproc,
elapsed_trid_x,
elapsed_trid_y,
elapsed_trid_z);
}
MPI_Barrier(MPI_COMM_WORLD);
// Free memory
finalize(app, pre_handle);
MPI_Finalize();
cudaDeviceReset();
return 0;
}
|
378a0191ba52fba6699a062e267d643397cbddb0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cinttypes>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 32
#define GPU_NITER 100
#define MAT_SIZE_X 10000
#define MAT_SIZE_Y 10000
// CUDA runtime
#define CHECK(func) \
{ \
const hipError_t error = func; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("Code:%d, Reason: %s\n", error, \
hipGetErrorString(error)); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
double cpu_second(void) {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
double gflops(double sec, uint32_t mat_size_x, uint32_t mat_size_y) {
double operations = mat_size_x * mat_size_y;
double gflops = operations * 1.0e-9f / sec;
return gflops;
}
// CUDA(kernel)
__global__ void add_matrix_gpu(float *dMat_A, float *dMat_B, float *dMat_G, uint32_t mat_size_x, uint32_t mat_size_y) {
uint32_t mat_x = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t mat_y = threadIdx.y + blockIdx.y * blockDim.y;
if (mat_x >= mat_size_x) {
return;
}
if (mat_y >= mat_size_y) {
return;
}
uint32_t index = mat_y * mat_size_x + mat_x;
dMat_G[index] = dMat_A[index] + dMat_B[index];
}
// GPU
void invoke_gpu(const float* __restrict__ hMat_A, const float* __restrict__ hMat_B, float *hMat_G, uint32_t mat_size_x, uint32_t mat_size_y) {
float *dMat_A = NULL;
float *dMat_B = NULL;
float *dMat_G = NULL;
int nBytes = sizeof(float) * mat_size_x * mat_size_y;
// GPU(device)
CHECK(hipMalloc((float **) &dMat_A, nBytes));
CHECK(hipMalloc((float **) &dMat_B, nBytes));
CHECK(hipMalloc((float **) &dMat_G, nBytes));
// GPU(device)
CHECK(hipMemcpy(dMat_A, hMat_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(dMat_B, hMat_B, nBytes, hipMemcpyHostToDevice));
// gird / block
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((mat_size_x + block.x - 1) / block.x, (mat_size_y + block.y - 1) / block.y);
printf("Grid = (%d, %d), Block = (%d, %d)\n", grid.x, grid.y, block.x, block.y);
// GPU
double start_sec = cpu_second();
for (int i = 0; i < GPU_NITER; i++) {
hipLaunchKernelGGL(( add_matrix_gpu), dim3(grid), dim3(block), 0, 0, dMat_A, dMat_B, dMat_G, mat_size_x, mat_size_y);
}
CHECK(hipDeviceSynchronize());
double elapsed_sec = (cpu_second() - start_sec) / GPU_NITER;
//
CHECK(hipMemcpy(hMat_G, dMat_G, nBytes, hipMemcpyDeviceToHost));
// 10
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
if (i < 10) {
printf("A[%d]=%8.4f, B[%d]=%8.4f, G[%d]=%8.4f\n", i, hMat_A[i], i, hMat_B[i], i, hMat_G[i]);
}
}
printf("GPU: Time elapsed %lf sec (%lf GFLOPS)\n", elapsed_sec, gflops(elapsed_sec, mat_size_x, mat_size_y));
CHECK(hipFree(dMat_A));
CHECK(hipFree(dMat_B));
CHECK(hipFree(dMat_G));
CHECK(hipDeviceReset());
}
// CPU()
void add_matrix_cpu(float *hMat_A, float *hMat_B, float *hMat_C, uint32_t mat_size_x, uint32_t mat_size_y) {
for (uint32_t y = 0; y < mat_size_y; y++) {
for (uint32_t x = 0; x < mat_size_x; x++) {
uint32_t index = y * mat_size_x + x;
hMat_C[index] = hMat_A[index] + hMat_B[index];
}
}
}
// CPU
void invoke_cpu(float *hMat_A, float *hMat_B, float *hMat_C, uint32_t mat_size_x, uint32_t mat_size_y) {
double start_sec = cpu_second();
for (int i = 0; i < GPU_NITER; i++) {
add_matrix_cpu(hMat_A, hMat_B, hMat_C, mat_size_x, mat_size_y);
}
double elapsed_sec = (cpu_second() - start_sec) / GPU_NITER;
// 10
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
if (i < 10) {
printf("A[%d]=%8.4f, B[%d]=%8.4f, G[%d]=%8.4f\n", i, hMat_A[i], i, hMat_B[i], i, hMat_C[i]);
}
}
printf("CPU: Time elapsed %lf sec (%lf GFLOPS)\n", elapsed_sec, gflops(elapsed_sec, mat_size_x, mat_size_y));
}
int main(void)
{
uint32_t mat_size_x = MAT_SIZE_X;
uint32_t mat_size_y = MAT_SIZE_Y;
int nBytes = sizeof(float) * mat_size_x * mat_size_y;
float *hMat_A;
float *hMat_B;
float *hMat_G;
// ABG
hMat_A = (float *) malloc(nBytes);
hMat_B = (float *) malloc(nBytes);
hMat_G = (float *) malloc(nBytes);
// AB
time_t t;
srand((unsigned int) time(&t));
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
hMat_A[i] = (float)(rand() % 100000) / 10000.0f;
hMat_B[i] = (float)(rand() % 100000) / 10000.0f;
}
invoke_gpu(hMat_A, hMat_B, hMat_G, mat_size_x, mat_size_y);
invoke_cpu(hMat_A, hMat_B, hMat_G, mat_size_x, mat_size_y);
//
free(hMat_A);
free(hMat_B);
free(hMat_G);
}
| 378a0191ba52fba6699a062e267d643397cbddb0.cu | #include <cinttypes>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define BLOCK_SIZE 32
#define GPU_NITER 100
#define MAT_SIZE_X 10000
#define MAT_SIZE_Y 10000
// CUDA runtime のエラー処理をラップするマクロ
#define CHECK(func) \
{ \
const cudaError_t error = func; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("Code:%d, Reason: %s\n", error, \
cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
double cpu_second(void) {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
double gflops(double sec, uint32_t mat_size_x, uint32_t mat_size_y) {
double operations = mat_size_x * mat_size_y;
double gflops = operations * 1.0e-9f / sec;
return gflops;
}
// CUDAで行列の足し算を実行する関数(kernel)
__global__ void add_matrix_gpu(float *dMat_A, float *dMat_B, float *dMat_G, uint32_t mat_size_x, uint32_t mat_size_y) {
uint32_t mat_x = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t mat_y = threadIdx.y + blockIdx.y * blockDim.y;
if (mat_x >= mat_size_x) {
return;
}
if (mat_y >= mat_size_y) {
return;
}
uint32_t index = mat_y * mat_size_x + mat_x;
dMat_G[index] = dMat_A[index] + dMat_B[index];
}
// GPUで計算するためのホスト側の処理
void invoke_gpu(const float* __restrict__ hMat_A, const float* __restrict__ hMat_B, float *hMat_G, uint32_t mat_size_x, uint32_t mat_size_y) {
float *dMat_A = NULL;
float *dMat_B = NULL;
float *dMat_G = NULL;
int nBytes = sizeof(float) * mat_size_x * mat_size_y;
// GPU(device)側にメモリを確保
CHECK(cudaMalloc((float **) &dMat_A, nBytes));
CHECK(cudaMalloc((float **) &dMat_B, nBytes));
CHECK(cudaMalloc((float **) &dMat_G, nBytes));
// GPU(device)側にホスト側のメモリの内容をコピーする
CHECK(cudaMemcpy(dMat_A, hMat_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dMat_B, hMat_B, nBytes, cudaMemcpyHostToDevice));
// 計算処理を分割する gird / block を計算する
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((mat_size_x + block.x - 1) / block.x, (mat_size_y + block.y - 1) / block.y);
printf("Grid = (%d, %d), Block = (%d, %d)\n", grid.x, grid.y, block.x, block.y);
// GPUで計算処理を行った際のパフォーマンスを計測する
double start_sec = cpu_second();
for (int i = 0; i < GPU_NITER; i++) {
add_matrix_gpu<<<grid, block>>> (dMat_A, dMat_B, dMat_G, mat_size_x, mat_size_y);
}
CHECK(cudaDeviceSynchronize());
double elapsed_sec = (cpu_second() - start_sec) / GPU_NITER;
// 結果をホスト側にコピーする
CHECK(cudaMemcpy(hMat_G, dMat_G, nBytes, cudaMemcpyDeviceToHost));
// 最初の10個だけ結果を表示する
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
if (i < 10) {
printf("A[%d]=%8.4f, B[%d]=%8.4f, G[%d]=%8.4f\n", i, hMat_A[i], i, hMat_B[i], i, hMat_G[i]);
}
}
printf("GPU: Time elapsed %lf sec (%lf GFLOPS)\n", elapsed_sec, gflops(elapsed_sec, mat_size_x, mat_size_y));
CHECK(cudaFree(dMat_A));
CHECK(cudaFree(dMat_B));
CHECK(cudaFree(dMat_G));
CHECK(cudaDeviceReset());
}
// CPUで行列の足し算を実行する関数(シングルスレッド)
void add_matrix_cpu(float *hMat_A, float *hMat_B, float *hMat_C, uint32_t mat_size_x, uint32_t mat_size_y) {
for (uint32_t y = 0; y < mat_size_y; y++) {
for (uint32_t x = 0; x < mat_size_x; x++) {
uint32_t index = y * mat_size_x + x;
hMat_C[index] = hMat_A[index] + hMat_B[index];
}
}
}
// CPUで計算を実行する
void invoke_cpu(float *hMat_A, float *hMat_B, float *hMat_C, uint32_t mat_size_x, uint32_t mat_size_y) {
double start_sec = cpu_second();
for (int i = 0; i < GPU_NITER; i++) {
add_matrix_cpu(hMat_A, hMat_B, hMat_C, mat_size_x, mat_size_y);
}
double elapsed_sec = (cpu_second() - start_sec) / GPU_NITER;
// 最初の10個だけ結果を表示する
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
if (i < 10) {
printf("A[%d]=%8.4f, B[%d]=%8.4f, G[%d]=%8.4f\n", i, hMat_A[i], i, hMat_B[i], i, hMat_C[i]);
}
}
printf("CPU: Time elapsed %lf sec (%lf GFLOPS)\n", elapsed_sec, gflops(elapsed_sec, mat_size_x, mat_size_y));
}
int main(void)
{
uint32_t mat_size_x = MAT_SIZE_X;
uint32_t mat_size_y = MAT_SIZE_Y;
int nBytes = sizeof(float) * mat_size_x * mat_size_y;
float *hMat_A;
float *hMat_B;
float *hMat_G;
// 行列Aと行列B、結果を格納する行列Gのためのメモリを確保する
hMat_A = (float *) malloc(nBytes);
hMat_B = (float *) malloc(nBytes);
hMat_G = (float *) malloc(nBytes);
// 乱数で行列Aと行列Bを初期化する
time_t t;
srand((unsigned int) time(&t));
for (uint32_t i = 0; i < mat_size_x * mat_size_y; i++) {
hMat_A[i] = (float)(rand() % 100000) / 10000.0f;
hMat_B[i] = (float)(rand() % 100000) / 10000.0f;
}
invoke_gpu(hMat_A, hMat_B, hMat_G, mat_size_x, mat_size_y);
invoke_cpu(hMat_A, hMat_B, hMat_G, mat_size_x, mat_size_y);
// 確保したメモリを開放する
free(hMat_A);
free(hMat_B);
free(hMat_G);
}
|
94f44f0bdb004b35ca20dfeede1934875d39d0f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdint.h>
#include "md5.cu"
__device__ int best_prefix = 6;
__device__ int best_suffix = 6;
__device__ int pow(int x, int y) {
int res = 1;
for(int i = 0; i < y; i++) {
res *= x;
}
return res;
}
__device__ char* itoa(int i) {
int len = 1, temp = i;
while ((temp /= 10) > 0) {
len++;
}
char* a = (char*)malloc(sizeof(char)*(32 + 1));
for(int idx = len-1; idx >= 0; idx--) {
temp = i / pow(10, idx) % 10;
a[len-idx-1] = temp + '0';
}
a[len] = '\0';
return a;
}
__device__ int strlen(char* str) {
int len;
for(len = 0; str[len] != '\0'; len++) { }
return len;
}
__device__ void hexify(const unsigned char* input, char* output) {
const char* map = "0123456789abcdef";
for(int i = 0; i < 16; i++) {
output[i*2] = map[(input[i] & 0xF0) >> 4];
output[i*2+1] = map[(input[i] & 0x0F)];
}
output[32] = '\0';
}
__global__ void brute(int threads, int* randNums) {
int seed = randNums[(blockIdx.x*threads)+threadIdx.x];
char* seed_buffer = itoa(seed);
unsigned char seed_md5[16];
md5(seed_buffer, strlen(seed_buffer), seed_md5);
char buffer[33];
hexify(seed_md5, buffer);
while (1) {
char* old_buffer = (char*)malloc(sizeof(char)*33);
memcpy(old_buffer, buffer, 33);
md5(buffer, strlen(buffer), seed_md5);
hexify(seed_md5, buffer);
for (int i = 0; i < 32; i++){
if (old_buffer[i] != buffer[i]) {
if (i > best_prefix) {
printf("[%d->%d] new best prefix match: %d characters\n", blockIdx.x, threadIdx.x, i);
printf("[%d->%d] %s -> %s\n", blockIdx.x, threadIdx.x, old_buffer, buffer);
best_prefix = i;
}
break;
}
}
for (int i = 32-1; i > 0; i--){
if (old_buffer[i] != buffer[i]) {
if (32-i > best_suffix) {
printf("[%d->%d] new best suffix match: %d characters\n", blockIdx.x, threadIdx.x, 32-i-1);
printf("[%d->%d] %s -> %s\n", blockIdx.x, threadIdx.x, old_buffer, buffer);
best_suffix = i;
}
break;
}
}
free(old_buffer);
}
}
int run_test(const char* name, const char* result, const char* expected) {
if (strcmp(expected, result) == 0) {
printf("TEST PASSED: %s: expected %s, got %s\n", name, expected, result);
return 1;
} else {
printf("TEST FAILED: %s: expected %s, got %s\n", name, expected, result);
return 0;
}
}
int main() {
int h_blocks = 128;
int h_threads = 256;
int* h_randNums = (int*)malloc(sizeof(int) * h_blocks * h_threads);
srand(time(0));
for (int i = 0; i < h_blocks * h_threads; i++) {
h_randNums[i] = rand();
}
int* d_randNums;
hipMalloc((void**)&d_randNums, sizeof(int)*h_blocks*h_threads);
hipMemcpy(d_randNums, h_randNums, sizeof(int)*h_blocks*h_threads, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( brute), dim3(h_blocks), dim3(h_threads), 0, 0, h_threads, d_randNums);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("CUDA Error: %s\n", hipGetErrorString(err));
}
} | 94f44f0bdb004b35ca20dfeede1934875d39d0f1.cu | #include <cuda.h>
#include <stdint.h>
#include "md5.cu"
__device__ int best_prefix = 6;
__device__ int best_suffix = 6;
__device__ int pow(int x, int y) {
int res = 1;
for(int i = 0; i < y; i++) {
res *= x;
}
return res;
}
__device__ char* itoa(int i) {
int len = 1, temp = i;
while ((temp /= 10) > 0) {
len++;
}
char* a = (char*)malloc(sizeof(char)*(32 + 1));
for(int idx = len-1; idx >= 0; idx--) {
temp = i / pow(10, idx) % 10;
a[len-idx-1] = temp + '0';
}
a[len] = '\0';
return a;
}
__device__ int strlen(char* str) {
int len;
for(len = 0; str[len] != '\0'; len++) { }
return len;
}
__device__ void hexify(const unsigned char* input, char* output) {
const char* map = "0123456789abcdef";
for(int i = 0; i < 16; i++) {
output[i*2] = map[(input[i] & 0xF0) >> 4];
output[i*2+1] = map[(input[i] & 0x0F)];
}
output[32] = '\0';
}
__global__ void brute(int threads, int* randNums) {
int seed = randNums[(blockIdx.x*threads)+threadIdx.x];
char* seed_buffer = itoa(seed);
unsigned char seed_md5[16];
md5(seed_buffer, strlen(seed_buffer), seed_md5);
char buffer[33];
hexify(seed_md5, buffer);
while (1) {
char* old_buffer = (char*)malloc(sizeof(char)*33);
memcpy(old_buffer, buffer, 33);
md5(buffer, strlen(buffer), seed_md5);
hexify(seed_md5, buffer);
for (int i = 0; i < 32; i++){
if (old_buffer[i] != buffer[i]) {
if (i > best_prefix) {
printf("[%d->%d] new best prefix match: %d characters\n", blockIdx.x, threadIdx.x, i);
printf("[%d->%d] %s -> %s\n", blockIdx.x, threadIdx.x, old_buffer, buffer);
best_prefix = i;
}
break;
}
}
for (int i = 32-1; i > 0; i--){
if (old_buffer[i] != buffer[i]) {
if (32-i > best_suffix) {
printf("[%d->%d] new best suffix match: %d characters\n", blockIdx.x, threadIdx.x, 32-i-1);
printf("[%d->%d] %s -> %s\n", blockIdx.x, threadIdx.x, old_buffer, buffer);
best_suffix = i;
}
break;
}
}
free(old_buffer);
}
}
int run_test(const char* name, const char* result, const char* expected) {
if (strcmp(expected, result) == 0) {
printf("TEST PASSED: %s: expected %s, got %s\n", name, expected, result);
return 1;
} else {
printf("TEST FAILED: %s: expected %s, got %s\n", name, expected, result);
return 0;
}
}
int main() {
int h_blocks = 128;
int h_threads = 256;
int* h_randNums = (int*)malloc(sizeof(int) * h_blocks * h_threads);
srand(time(0));
for (int i = 0; i < h_blocks * h_threads; i++) {
h_randNums[i] = rand();
}
int* d_randNums;
cudaMalloc((void**)&d_randNums, sizeof(int)*h_blocks*h_threads);
cudaMemcpy(d_randNums, h_randNums, sizeof(int)*h_blocks*h_threads, cudaMemcpyHostToDevice);
brute<<<h_blocks, h_threads>>>(h_threads, d_randNums);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("CUDA Error: %s\n", cudaGetErrorString(err));
}
} |
970a201e030f2637859d99ab10db9e875ddae11a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_mgpu_upper.cu, normal z -> d, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
dsymv_kernel_U_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U_mgpu
/***************************************************************************//**
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
*******************************************************************************/
__global__ void
dsymv_kernel_U_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for (int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
| 970a201e030f2637859d99ab10db9e875ddae11a.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_mgpu_upper.cu, normal z -> d, Sun Nov 20 20:20:31 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
dsymv_kernel_U_mgpu(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( (partial && tx >= partial) ||
(blk == 0 && tx < block_offset) ) {
sx_blk[tx] = MAGMA_D_ZERO;
}
else {
sx_blk[tx] = x[0];
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
if ( blk % ngpu == my_gpu_id ) {
// this GPU owns this diagonal block, so
// move to 32x32 diag block
A += (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= (blk/ngpu)*NB_X*lda; // A is A(blk_ind + tx2, ty2)
}
// finish switching thread offset
A -= ty2*lda + tx2; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
int next = blk + (my_gpu_id + ngpu - 1 - blk % ngpu) % ngpu + 1;
A += (next/ngpu)*NB_X*lda; // A is A(blk_ind + tx, next*NB_X + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=next; jj < gridDim.x; jj += ngpu) {
partial = (jj == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
// block is right of diagonal, so don't need to worry about offset here
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; //MAGMA_D_MAKE( tx4, blk ); // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? ((n + block_offset) % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; //MAGMA_D_MAKE( tx, blk ); // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U_mgpu
/***************************************************************************//**
Upper case, sum up partial results per GPU.
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ]
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
Note beta*y is not included here; see magmablas_dsymv_mgpu_sync.
The above workspace is distributed over multiple GPUs as diagrammed for 5 blocks:
[ * ] blk=0 * data for non-transposed row w_blk = A_{blk,1:nblock} * x_{blk:nblock}
work[gpu=0] = [ * ] blk=1 x data for transposed block w_jj = A_{blk,jj}^H * x_{blk}
[ x x * ] blk=2 blanks are not set
[ * ] blk=3
[ x x x x * ] blk=4
[ * ] blk=0
work[gpu=1] = [ x * ] blk=1
[ * ] blk=2
[ x x x * ] blk=3
[ * ] blk=4
On output, rows across are summed up.
Entries right of the diagonal blocks are not accessed.
There are no blank lines; work has been set to 0 if a GPU has no data to contribute.
[ * ]
y[gpu=0] = [ * ]
[ x + x + * ]
[ * ]
[ x + x + x + x + * ]
[ * ]
y[gpu=1] = [ x + * ]
[ * ]
[ x + x + x + * ]
[ * ]
*******************************************************************************/
__global__ void
dsymv_kernel_U_mgpu_sum(
int n,
double alpha,
int lda,
double * __restrict__ y, int incy,
double const * __restrict__ work,
int my_gpu_id,
int ngpu,
int block_offset)
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [block_offset, ..., n+block_offset)
if ( ind >= block_offset && ind < n+block_offset ) {
double Ax = MAGMA_D_ZERO;
work += ind;
// if this GPU owns block-column blk, all blocks j=[0, ..., blk] contain data;
// else only block j=blk contains data.
int first = 0;
if ( blk % ngpu != my_gpu_id ) {
first = blk;
}
for (int j = first; j <= blk; ++j) {
Ax += work[j*lda];
}
y[ind * incy] = alpha*Ax; // see magmablas_dsymv_sync for beta*y
}
}
// end dsymv_kernel_L_mgpu_sum
|
edb545c3dc30b00aadc563a4e8b265985c7f356f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2021 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "Fast.h"
#if defined(SAIGA_USE_EIGEN)
namespace Saiga
{
namespace CUDA
{
__constant__ unsigned char c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint32_t C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint32_t C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
__device__ int isKeyPoint2(Saiga::ImageView<unsigned char> img, const int i, const int j, const int threshold)
{
int v;
uint32_t C[4] = {0, 0, 0, 0};
C[2] |= static_cast<uint32_t>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint32_t>(img(i - 3, j));
C[1] |= static_cast<uint32_t>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint32_t>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint32_t>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint32_t>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint32_t>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint32_t>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint32_t>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
{
return 0;
}
C[3] |= static_cast<uint32_t>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint32_t>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint32_t>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint32_t>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint32_t>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint32_t>(img(i + 3, j));
C[0] |= static_cast<uint32_t>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2))
{
return cornerScore(C, v, threshold);
}
return 0;
}
__device__ bool isMax(int2 loc, Saiga::ImageView<int> scoreMat)
{
int score = scoreMat(loc.y, loc.x);
bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y, loc.x - 1) && score > scoreMat(loc.y, loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x) &&
score > scoreMat(loc.y + 1, loc.x + 1);
return ismax;
}
template <int TILE_SIZE_X, int TILE_SIZE_Y>
__global__ void tileCalcKeypoints_kernel(Saiga::ImageView<unsigned char> img_, short2* kpLoc, float* kpScore,
const unsigned int maxKeypoints, const int highThreshold,
const int lowThreshold, unsigned int* counter_ptr)
{
int max_kps_high = 50;
int max_kps_low = 50;
const int required_border = 4;
const int local_image_w = TILE_SIZE_X + 2 * required_border;
const int local_image_h = TILE_SIZE_Y + 2 * required_border;
static_assert(local_image_w % 4 == 0, "sdjf");
static_assert(local_image_h % 4 == 0, "sdjf");
CUDA_ASSERT(img_.pitchBytes % 4 == 0);
__shared__ int local_image_i[local_image_h][local_image_w / 4];
__shared__ int local_score[local_image_h][local_image_w];
__shared__ unsigned int num_kps;
const int2 global_inner_start = {int(blockIdx.x * blockDim.x), int((blockIdx.y * blockDim.y) * 4)};
const int2 global_outer_start = {global_inner_start.x - 4, global_inner_start.y - 4};
const int block_start_x = blockIdx.x * blockDim.x;
const int block_start_y = (blockIdx.y * blockDim.y) * 4;
const int linear_local_tid = threadIdx.y * blockDim.x + threadIdx.x;
for (int t = linear_local_tid; t < (local_image_w / 4) * local_image_h; t += blockDim.x * blockDim.y)
{
int local_x = t % (local_image_w / 4);
int local_y = t / (local_image_w / 4);
int x = global_outer_start.x + local_x * 4;
int y = global_outer_start.y + local_y;
CUDA_ASSERT(x % 4 == 0);
// clamp to border is better than conditional reads
x = max(0, min(x, (int)img_.pitchBytes - 4));
y = max(0, min(y, img_.rows - 1));
CUDA_ASSERT(x % 4 == 0);
reinterpret_cast<int*>(&local_image_i[local_y][local_x])[0] = reinterpret_cast<const int*>(&img_(y, x))[0];
}
__syncthreads();
Saiga::ImageView<unsigned char> img;
img.w = local_image_w;
img.h = local_image_h;
img.pitchBytes = local_image_w;
img.dataT = reinterpret_cast<unsigned char*>(&local_image_i[0][0]);
Saiga::ImageView<int> scoreMat;
scoreMat.w = local_image_w;
scoreMat.h = local_image_h;
scoreMat.pitchBytes = local_image_w * 4;
scoreMat.dataT = reinterpret_cast<int*>(&local_score[0][0]);
if (linear_local_tid == 0)
{
num_kps = 0;
}
// compute score
for (int t = linear_local_tid; t < (32 + 2) * (32 + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (32 + 2);
int local_y = t / (32 + 2);
int x = local_x + 3;
int y = local_y + 3;
scoreMat(y, x) = isKeyPoint2(img, y, x, highThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_high)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
__syncthreads();
if (num_kps > 0) return;
// compute score
for (int t = linear_local_tid; t < (TILE_SIZE_X + 2) * (TILE_SIZE_Y + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (TILE_SIZE_X + 2);
int local_y = t / (TILE_SIZE_Y + 2);
int x = local_x + 3;
int y = local_y + 3;
bool in_bounds = block_start_y + y < img_.rows - 3 & block_start_x + x < img_.cols - 3;
scoreMat(y, x) = in_bounds * isKeyPoint2(img, y, x, lowThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_low)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
}
__global__ void createKps(Saiga::ArrayView<Saiga::KeyPoint<float>> kps, short2* kpLoc, float* kpScore)
{
Saiga::CUDA::ThreadInfo<> ti;
int i = ti.thread_id;
if (i >= kps.size())
{
return;
}
kps[i] = Saiga::KeyPoint<float>(kpLoc[i].x, kpLoc[i].y, 7, -1, kpScore[i]);
}
Fast::Fast(int highThreshold, int lowThreshold, int maxKeypoints)
: highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints)
{
counter_keypoint_location.resize(maxKeypoints + 1);
keypoint_score.resize(maxKeypoints);
h_counter_keypoint_location.resize(maxKeypoints + 1);
h_keypoint_score.resize(maxKeypoints);
}
Fast::~Fast() {}
void Fast::Detect(Saiga::ImageView<unsigned char> d_image, hipStream_t stream)
{
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto d_counter = (unsigned int*)counter_keypoint_location.data().get();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
{
CHECK_CUDA_ERROR(hipMemsetAsync(d_counter, 0, sizeof(unsigned int), stream));
dim3 dimBlock(32, 8);
dim3 dimGrid(Saiga::iDivUp(d_image.cols, 32), Saiga::iDivUp(d_image.rows, 32));
hipLaunchKernelGGL(( tileCalcKeypoints_kernel<32, 32>), dim3(dimGrid), dim3(dimBlock), 0, stream, d_image, keypoint_location,
keypoint_score.data().get(), maxKeypoints,
highThreshold, lowThreshold, d_counter);
CHECK_CUDA_ERROR(hipMemcpyAsync(h_counter_keypoint_location.data(), counter_keypoint_location.data().get(),
sizeof(short2) * (actual_max_keypoints + 1), hipMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(hipMemcpyAsync(h_keypoint_score.data(), keypoint_score.data().get(),
sizeof(float) * actual_max_keypoints, hipMemcpyDeviceToHost, stream));
detection_finished.record(stream);
}
}
int Fast::Download(Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints, hipStream_t stream)
{
detection_finished.synchronize();
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
auto count = h_counter[0];
if (count > actual_max_keypoints)
{
auto remaining_points = count - actual_max_keypoints;
CHECK_CUDA_ERROR(hipMemcpyAsync(h_counter_keypoint_location.data() + actual_max_keypoints + 1,
keypoint_location + actual_max_keypoints, sizeof(short2) * remaining_points,
hipMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(hipMemcpyAsync(h_keypoint_score.data() + actual_max_keypoints,
keypoint_score.data().get() + actual_max_keypoints,
sizeof(float) * remaining_points, hipMemcpyDeviceToHost, stream));
actual_max_keypoints = count * 1.05;
CHECK_CUDA_ERROR(hipStreamSynchronize(stream));
}
SAIGA_ASSERT(keypoints.size() >= count);
for (int i = 0; i < count; ++i)
{
Saiga::KeyPoint<float> kp(h_counter_keypoint_location[i + 1].x, h_counter_keypoint_location[i + 1].y, 0, -1,
h_keypoint_score[i]);
keypoints[i] = kp;
}
return count;
}
} // namespace CUDA
} // namespace Saiga
#endif | edb545c3dc30b00aadc563a4e8b265985c7f356f.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "Fast.h"
#if defined(SAIGA_USE_EIGEN)
namespace Saiga
{
namespace CUDA
{
__constant__ unsigned char c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint32_t C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint32_t C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
__device__ int isKeyPoint2(Saiga::ImageView<unsigned char> img, const int i, const int j, const int threshold)
{
int v;
uint32_t C[4] = {0, 0, 0, 0};
C[2] |= static_cast<uint32_t>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint32_t>(img(i - 3, j));
C[1] |= static_cast<uint32_t>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint32_t>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint32_t>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint32_t>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint32_t>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint32_t>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint32_t>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
{
return 0;
}
C[3] |= static_cast<uint32_t>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint32_t>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint32_t>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint32_t>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint32_t>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint32_t>(img(i + 3, j));
C[0] |= static_cast<uint32_t>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2))
{
return cornerScore(C, v, threshold);
}
return 0;
}
__device__ bool isMax(int2 loc, Saiga::ImageView<int> scoreMat)
{
int score = scoreMat(loc.y, loc.x);
bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y, loc.x - 1) && score > scoreMat(loc.y, loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x) &&
score > scoreMat(loc.y + 1, loc.x + 1);
return ismax;
}
template <int TILE_SIZE_X, int TILE_SIZE_Y>
__global__ void tileCalcKeypoints_kernel(Saiga::ImageView<unsigned char> img_, short2* kpLoc, float* kpScore,
const unsigned int maxKeypoints, const int highThreshold,
const int lowThreshold, unsigned int* counter_ptr)
{
int max_kps_high = 50;
int max_kps_low = 50;
const int required_border = 4;
const int local_image_w = TILE_SIZE_X + 2 * required_border;
const int local_image_h = TILE_SIZE_Y + 2 * required_border;
static_assert(local_image_w % 4 == 0, "sdjf");
static_assert(local_image_h % 4 == 0, "sdjf");
CUDA_ASSERT(img_.pitchBytes % 4 == 0);
__shared__ int local_image_i[local_image_h][local_image_w / 4];
__shared__ int local_score[local_image_h][local_image_w];
__shared__ unsigned int num_kps;
const int2 global_inner_start = {int(blockIdx.x * blockDim.x), int((blockIdx.y * blockDim.y) * 4)};
const int2 global_outer_start = {global_inner_start.x - 4, global_inner_start.y - 4};
const int block_start_x = blockIdx.x * blockDim.x;
const int block_start_y = (blockIdx.y * blockDim.y) * 4;
const int linear_local_tid = threadIdx.y * blockDim.x + threadIdx.x;
for (int t = linear_local_tid; t < (local_image_w / 4) * local_image_h; t += blockDim.x * blockDim.y)
{
int local_x = t % (local_image_w / 4);
int local_y = t / (local_image_w / 4);
int x = global_outer_start.x + local_x * 4;
int y = global_outer_start.y + local_y;
CUDA_ASSERT(x % 4 == 0);
// clamp to border is better than conditional reads
x = max(0, min(x, (int)img_.pitchBytes - 4));
y = max(0, min(y, img_.rows - 1));
CUDA_ASSERT(x % 4 == 0);
reinterpret_cast<int*>(&local_image_i[local_y][local_x])[0] = reinterpret_cast<const int*>(&img_(y, x))[0];
}
__syncthreads();
Saiga::ImageView<unsigned char> img;
img.w = local_image_w;
img.h = local_image_h;
img.pitchBytes = local_image_w;
img.dataT = reinterpret_cast<unsigned char*>(&local_image_i[0][0]);
Saiga::ImageView<int> scoreMat;
scoreMat.w = local_image_w;
scoreMat.h = local_image_h;
scoreMat.pitchBytes = local_image_w * 4;
scoreMat.dataT = reinterpret_cast<int*>(&local_score[0][0]);
if (linear_local_tid == 0)
{
num_kps = 0;
}
// compute score
for (int t = linear_local_tid; t < (32 + 2) * (32 + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (32 + 2);
int local_y = t / (32 + 2);
int x = local_x + 3;
int y = local_y + 3;
scoreMat(y, x) = isKeyPoint2(img, y, x, highThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_high)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
__syncthreads();
if (num_kps > 0) return;
// compute score
for (int t = linear_local_tid; t < (TILE_SIZE_X + 2) * (TILE_SIZE_Y + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (TILE_SIZE_X + 2);
int local_y = t / (TILE_SIZE_Y + 2);
int x = local_x + 3;
int y = local_y + 3;
bool in_bounds = block_start_y + y < img_.rows - 3 & block_start_x + x < img_.cols - 3;
scoreMat(y, x) = in_bounds * isKeyPoint2(img, y, x, lowThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_low)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
}
__global__ void createKps(Saiga::ArrayView<Saiga::KeyPoint<float>> kps, short2* kpLoc, float* kpScore)
{
Saiga::CUDA::ThreadInfo<> ti;
int i = ti.thread_id;
if (i >= kps.size())
{
return;
}
kps[i] = Saiga::KeyPoint<float>(kpLoc[i].x, kpLoc[i].y, 7, -1, kpScore[i]);
}
Fast::Fast(int highThreshold, int lowThreshold, int maxKeypoints)
: highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints)
{
counter_keypoint_location.resize(maxKeypoints + 1);
keypoint_score.resize(maxKeypoints);
h_counter_keypoint_location.resize(maxKeypoints + 1);
h_keypoint_score.resize(maxKeypoints);
}
Fast::~Fast() {}
void Fast::Detect(Saiga::ImageView<unsigned char> d_image, cudaStream_t stream)
{
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto d_counter = (unsigned int*)counter_keypoint_location.data().get();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
{
CHECK_CUDA_ERROR(cudaMemsetAsync(d_counter, 0, sizeof(unsigned int), stream));
dim3 dimBlock(32, 8);
dim3 dimGrid(Saiga::iDivUp(d_image.cols, 32), Saiga::iDivUp(d_image.rows, 32));
tileCalcKeypoints_kernel<32, 32><<<dimGrid, dimBlock, 0, stream>>>(d_image, keypoint_location,
keypoint_score.data().get(), maxKeypoints,
highThreshold, lowThreshold, d_counter);
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data(), counter_keypoint_location.data().get(),
sizeof(short2) * (actual_max_keypoints + 1), cudaMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data(), keypoint_score.data().get(),
sizeof(float) * actual_max_keypoints, cudaMemcpyDeviceToHost, stream));
detection_finished.record(stream);
}
}
int Fast::Download(Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints, cudaStream_t stream)
{
detection_finished.synchronize();
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
auto count = h_counter[0];
if (count > actual_max_keypoints)
{
auto remaining_points = count - actual_max_keypoints;
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data() + actual_max_keypoints + 1,
keypoint_location + actual_max_keypoints, sizeof(short2) * remaining_points,
cudaMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data() + actual_max_keypoints,
keypoint_score.data().get() + actual_max_keypoints,
sizeof(float) * remaining_points, cudaMemcpyDeviceToHost, stream));
actual_max_keypoints = count * 1.05;
CHECK_CUDA_ERROR(cudaStreamSynchronize(stream));
}
SAIGA_ASSERT(keypoints.size() >= count);
for (int i = 0; i < count; ++i)
{
Saiga::KeyPoint<float> kp(h_counter_keypoint_location[i + 1].x, h_counter_keypoint_location[i + 1].y, 0, -1,
h_keypoint_score[i]);
keypoints[i] = kp;
}
return count;
}
} // namespace CUDA
} // namespace Saiga
#endif |
33421dfaaf9493ee998302dffc73d9fab9539915.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//host_SparseMV
#include "test_comm.h"
// ---------------------------------------------------------------------
__host__ int smat_size( int count_nzmax, int cunt_rows ) {
return ( sizeof(t_ve) + sizeof(t_mindex) ) * count_nzmax
+ sizeof(t_mindex) * (cunt_rows + 1);
}
// ---------------------------------------------------------------------
__host__ void set_sparse_data( t_SparseMatrix* m, void* mv ) {
m->pCol = (t_mindex *) mv;
m->pNZElement = (t_ve *) (&m->pCol[m->nzmax] ) ;
m->pRow = (t_mindex *) (&m->pNZElement[m->nzmax]);
}
void host_sparseMatrixMul(t_FullMatrix * pResultVector,t_SparseMatrix *pSparseMatrix, t_FullMatrix * pVector){
t_SparseMatrix host_SparseMatrix,dev_SparseMatrix;
t_FullMatrix host_Vector,dev_Vector,host_ResultVector,dev_ResultVector;
size_t size_NZElement,size_Row,size_Col;
hipError_t e;
int sizeBlock,i;
float t_avg;
t_avg = 0;
sizeBlock = VECTOR_BLOCK_SIZE;
//=====debug==================
//printf("=======in host========== \n");
//printf("pSparseMatrix->m=%d \n",pSparseMatrix->m);
//printf("pSparseMatrix->n=%d \n",pSparseMatrix->n);
//============================
// Create an input and output data array on the GPU
//malloc memory for Input Sparse-Matrix
printf("malloc sparse Matrix \n");
dev_SparseMatrix.m = pSparseMatrix->m;
dev_SparseMatrix.n = pSparseMatrix->n;
dev_SparseMatrix.nzmax = pSparseMatrix->nzmax;
int msize = smat_size( dev_SparseMatrix.nzmax, dev_SparseMatrix.n );
printf(" got result %u \n", msize);
void *devicemem;
e = hipMalloc ( &devicemem, msize );
CUDA_UTIL_ERRORCHECK("hipMalloc")
//pSparseMatrix->pCol is the begin of memery block
e = hipMemcpy( devicemem, pSparseMatrix->pCol, msize , hipMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("hipMemcpy")
set_sparse_data( &dev_SparseMatrix, devicemem);
//malloc device memory for Input vector
//printf("malloc vector \n");
size_t size_VElement, size_RElement;
size_VElement = sizeof(t_ve)*pVector->m*pVector->n;
size_RElement = sizeof(t_ve)*pSparseMatrix->m;
hipMalloc( (void **) &(dev_Vector.pElement),size_VElement);
dev_Vector.m = pVector->m;//host_Vector.m;
dev_Vector.n = pVector->n;//host_Vector.n;
hipMemcpy(dev_Vector.pElement,pVector->pElement,size_VElement,hipMemcpyHostToDevice);
//printf("malloc output \n");
//malloc output Vector
dev_ResultVector.m = pSparseMatrix->m;
dev_ResultVector.n = 1;
hipMalloc( (void **) &(dev_ResultVector.pElement),size_RElement);
// Compute execution configuration using 128 threads per block
//dim3 dimGrid((sizeIn)/dimBlock.x);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,0);
printf("number of multiProcessors: %d \n",deviceProp.multiProcessorCount);
int gridSize = deviceProp.multiProcessorCount;
//printf("VECTOR_BLOCK_SIZE: %d \n",sizeBlock);
gridSize = 1024 ;
/*
//====for sparseMatrixMul_kernel04=======
dim3 dimBlock(sizeBlock);
if (gridSize > pSparseMatrix->m)gridSize = pSparseMatrix->m;
//========================
*/
//====for sparseMatrixMul_kernel05=======
int blockX = VECTOR_BLOCK_X; //32
int blockY = VECTOR_BLOCK_Y; //16
dim3 dimBlock(blockX,blockY);
if (gridSize*blockY > pSparseMatrix->m)gridSize = pSparseMatrix->m/blockY;
if ( (pSparseMatrix->m) % blockY !=0 ) gridSize+=1;
//================================
printf("grid size = %d\n",gridSize);
dim3 dimGrid(gridSize);
//if ( (sizeA) % sizeBlock !=0 ) dimGrid.x+=1;
printf("calling kernel \n");
START_CUDA_TIMER;
hipLaunchKernelGGL(( sparseMatrixMul), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_ResultVector,dev_SparseMatrix,dev_Vector);
e = hipGetLastError();
if ( e != hipSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", hipGetErrorString(e));
exit(-1);
}
STOP_CUDA_TIMER( &t_avg);
printf("GPU runing time =%lf (ms) \n",t_avg);
//printf("get Result \n");
//hipMemcpy( data_out_host->pElement,pResultVector->pElement, size_RElement, hipMemcpyDeviceToHost);
hipMemcpy( pResultVector->pElement,dev_ResultVector.pElement, size_RElement, hipMemcpyDeviceToHost);
pResultVector->m = pSparseMatrix->m;
pResultVector->n = 1;
//=========debug==============
//printf("==================Result in host============\n");
//for( i = 0; i < pResultVector->m; i++) printf("pResultVector->pElement[%d]=%f \n",i,pResultVector->pElement[i]);
//=======================
//printf("free host \n");
hipFree(devicemem);
hipFree(dev_Vector.pElement);
hipFree(dev_ResultVector.pElement);
}
| 33421dfaaf9493ee998302dffc73d9fab9539915.cu | //host_SparseMV
#include "test_comm.h"
// ---------------------------------------------------------------------
__host__ int smat_size( int count_nzmax, int cunt_rows ) {
return ( sizeof(t_ve) + sizeof(t_mindex) ) * count_nzmax
+ sizeof(t_mindex) * (cunt_rows + 1);
}
// ---------------------------------------------------------------------
__host__ void set_sparse_data( t_SparseMatrix* m, void* mv ) {
m->pCol = (t_mindex *) mv;
m->pNZElement = (t_ve *) (&m->pCol[m->nzmax] ) ;
m->pRow = (t_mindex *) (&m->pNZElement[m->nzmax]);
}
void host_sparseMatrixMul(t_FullMatrix * pResultVector,t_SparseMatrix *pSparseMatrix, t_FullMatrix * pVector){
t_SparseMatrix host_SparseMatrix,dev_SparseMatrix;
t_FullMatrix host_Vector,dev_Vector,host_ResultVector,dev_ResultVector;
size_t size_NZElement,size_Row,size_Col;
cudaError_t e;
int sizeBlock,i;
float t_avg;
t_avg = 0;
sizeBlock = VECTOR_BLOCK_SIZE;
//=====debug==================
//printf("=======in host========== \n");
//printf("pSparseMatrix->m=%d \n",pSparseMatrix->m);
//printf("pSparseMatrix->n=%d \n",pSparseMatrix->n);
//============================
// Create an input and output data array on the GPU
//malloc memory for Input Sparse-Matrix
printf("malloc sparse Matrix \n");
dev_SparseMatrix.m = pSparseMatrix->m;
dev_SparseMatrix.n = pSparseMatrix->n;
dev_SparseMatrix.nzmax = pSparseMatrix->nzmax;
int msize = smat_size( dev_SparseMatrix.nzmax, dev_SparseMatrix.n );
printf(" got result %u \n", msize);
void *devicemem;
e = cudaMalloc ( &devicemem, msize );
CUDA_UTIL_ERRORCHECK("cudaMalloc")
//pSparseMatrix->pCol is the begin of memery block
e = cudaMemcpy( devicemem, pSparseMatrix->pCol, msize , cudaMemcpyHostToDevice);
CUDA_UTIL_ERRORCHECK("cudaMemcpy")
set_sparse_data( &dev_SparseMatrix, devicemem);
//malloc device memory for Input vector
//printf("malloc vector \n");
size_t size_VElement, size_RElement;
size_VElement = sizeof(t_ve)*pVector->m*pVector->n;
size_RElement = sizeof(t_ve)*pSparseMatrix->m;
cudaMalloc( (void **) &(dev_Vector.pElement),size_VElement);
dev_Vector.m = pVector->m;//host_Vector.m;
dev_Vector.n = pVector->n;//host_Vector.n;
cudaMemcpy(dev_Vector.pElement,pVector->pElement,size_VElement,cudaMemcpyHostToDevice);
//printf("malloc output \n");
//malloc output Vector
dev_ResultVector.m = pSparseMatrix->m;
dev_ResultVector.n = 1;
cudaMalloc( (void **) &(dev_ResultVector.pElement),size_RElement);
// Compute execution configuration using 128 threads per block
//dim3 dimGrid((sizeIn)/dimBlock.x);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,0);
printf("number of multiProcessors: %d \n",deviceProp.multiProcessorCount);
int gridSize = deviceProp.multiProcessorCount;
//printf("VECTOR_BLOCK_SIZE: %d \n",sizeBlock);
gridSize = 1024 ;
/*
//====for sparseMatrixMul_kernel04=======
dim3 dimBlock(sizeBlock);
if (gridSize > pSparseMatrix->m)gridSize = pSparseMatrix->m;
//========================
*/
//====for sparseMatrixMul_kernel05=======
int blockX = VECTOR_BLOCK_X; //32
int blockY = VECTOR_BLOCK_Y; //16
dim3 dimBlock(blockX,blockY);
if (gridSize*blockY > pSparseMatrix->m)gridSize = pSparseMatrix->m/blockY;
if ( (pSparseMatrix->m) % blockY !=0 ) gridSize+=1;
//================================
printf("grid size = %d\n",gridSize);
dim3 dimGrid(gridSize);
//if ( (sizeA) % sizeBlock !=0 ) dimGrid.x+=1;
printf("calling kernel \n");
START_CUDA_TIMER;
sparseMatrixMul<<<dimGrid,dimBlock>>>(dev_ResultVector,dev_SparseMatrix,dev_Vector);
e = cudaGetLastError();
if ( e != cudaSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", cudaGetErrorString(e));
exit(-1);
}
STOP_CUDA_TIMER( &t_avg);
printf("GPU runing time =%lf (ms) \n",t_avg);
//printf("get Result \n");
//cudaMemcpy( data_out_host->pElement,pResultVector->pElement, size_RElement, cudaMemcpyDeviceToHost);
cudaMemcpy( pResultVector->pElement,dev_ResultVector.pElement, size_RElement, cudaMemcpyDeviceToHost);
pResultVector->m = pSparseMatrix->m;
pResultVector->n = 1;
//=========debug==============
//printf("==================Result in host============\n");
//for( i = 0; i < pResultVector->m; i++) printf("pResultVector->pElement[%d]=%f \n",i,pResultVector->pElement[i]);
//=======================
//printf("free host \n");
cudaFree(devicemem);
cudaFree(dev_Vector.pElement);
cudaFree(dev_ResultVector.pElement);
}
|
d0970ce039f69eb3c48c8f39421c588289b05934.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <float.h>
template <typename scalar_t>
__device__ void WarpReduce(
volatile scalar_t* min_dists,
volatile int64_t* min_idxs,
const size_t tid) {
// s = 32
if (min_dists[tid] > min_dists[tid + 32]) {
min_idxs[tid] = min_idxs[tid + 32];
min_dists[tid] = min_dists[tid + 32];
}
// s = 16
if (min_dists[tid] > min_dists[tid + 16]) {
min_idxs[tid] = min_idxs[tid + 16];
min_dists[tid] = min_dists[tid + 16];
}
// s = 8
if (min_dists[tid] > min_dists[tid + 8]) {
min_idxs[tid] = min_idxs[tid + 8];
min_dists[tid] = min_dists[tid + 8];
}
// s = 4
if (min_dists[tid] > min_dists[tid + 4]) {
min_idxs[tid] = min_idxs[tid + 4];
min_dists[tid] = min_dists[tid + 4];
}
// s = 2
if (min_dists[tid] > min_dists[tid + 2]) {
min_idxs[tid] = min_idxs[tid + 2];
min_dists[tid] = min_dists[tid + 2];
}
// s = 1
if (min_dists[tid] > min_dists[tid + 1]) {
min_idxs[tid] = min_idxs[tid + 1];
min_dists[tid] = min_dists[tid + 1];
}
}
// CUDA kernel to compute nearest neighbors between two batches of pointclouds
// where each point is of dimension D.
//
// Args:
// points1: First set of points, of shape (N, P1, D).
// points2: Second set of points, of shape (N, P2, D).
// idx: Output memory buffer of shape (N, P1).
// N: Batch size.
// P1: Number of points in points1.
// P2: Number of points in points2.
// D_2: Size of the shared buffer; this is D rounded up so that memory access
// is aligned.
//
template <typename scalar_t>
__global__ void NearestNeighborKernel(
const scalar_t* __restrict__ points1,
const scalar_t* __restrict__ points2,
int64_t* __restrict__ idx,
const size_t N,
const size_t P1,
const size_t P2,
const size_t D,
const size_t D_2) {
// Each block will compute one element of the output idx[n, i]. Within the
// block we will use threads to compute the distances between points1[n, i]
// and points2[n, j] for all 0 <= j < P2, then use a block reduction to
// take an argmin of the distances.
// Shared buffers for the threads in the block. CUDA only allows declaration
// of a single shared buffer, so it needs to be manually sliced and cast to
// build several logical shared buffers of different types.
extern __shared__ char shared_buf[];
scalar_t* x = (scalar_t*)shared_buf; // scalar_t[DD]
scalar_t* min_dists = &x[D_2]; // scalar_t[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t n = blockIdx.y; // index of batch element.
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x;
// Thread 0 copies points1[n, i, :] into x.
if (tid == 0) {
for (size_t d = 0; d < D; d++) {
x[d] = points1[n * (P1 * D) + i * D + d];
}
}
__syncthreads();
// Compute the distances between points1[n, i] and points2[n, j] for
// all 0 <= j < P2. Here each thread will reduce over P2 / blockDim.x
// in serial, and store its result to shared memory
scalar_t min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < P2; j += blockDim.x) {
scalar_t dist = 0;
for (size_t d = 0; d < D; d++) {
scalar_t x_d = x[d];
scalar_t y_d = points2[n * (P2 * D) + j * D + d];
scalar_t diff = x_d - y_d;
dist += diff * diff;
}
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? j : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<scalar_t>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx[n * P1 + i] = min_idxs[0];
}
}
// CUDA kernel to compute nearest neighbors between two sets of 3-dimensional
// pointclouds. This is a specialization of the nearest_neighbor_kernel
// to the case D=3.
//
// Args:
// points1: First set of pointclouds, of shape (N, P1, 3).
// points2: Second set of pointclouds, of shape (N, P2, 3).
// idx: Output memory buffer of shape (N, P1).
// N: Batch size.
// P1: Number of points in points1.
// P2: Number of points in points2.
//
template <typename scalar_t>
__global__ void NearestNeighborKernelD3(
const scalar_t* __restrict__ points1,
const scalar_t* __restrict__ points2,
int64_t* __restrict__ idx,
const size_t N,
const size_t P1,
const size_t P2) {
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t D = 3;
const size_t n = blockIdx.y; // index of batch element.
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x;
// Retrieve the coordinates of points1[n, i] from global memory; these
// will be stored in registers for fast access.
const scalar_t x = points1[n * (P1 * D) + i * D + 0];
const scalar_t y = points1[n * (P1 * D) + i * D + 1];
const scalar_t z = points1[n * (P1 * D) + i * D + 2];
// Compute distances between points1[n, i] and all points2[n, j]
// for 0 <= j < P2
scalar_t min_dist = FLT_MAX;
size_t min_idx = 0;
// Distance computation for points in p2 spread across threads in the block.
for (size_t j = tid; j < P2; j += blockDim.x) {
scalar_t dx = x - points2[n * (P2 * D) + j * D + 0];
scalar_t dy = y - points2[n * (P2 * D) + j * D + 1];
scalar_t dz = z - points2[n * (P2 * D) + j * D + 2];
scalar_t dist = dx * dx + dy * dy + dz * dz;
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? j : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
// Synchronize local threads writing to the shared memory buffer.
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
// Synchronize local threads so that min_dists is correct.
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<scalar_t>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx[n * P1 + i] = min_idxs[0];
}
}
at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) {
const auto N = p1.size(0);
const auto P1 = p1.size(1);
const auto P2 = p2.size(1);
const auto D = p1.size(2);
AT_ASSERTM(p2.size(2) == D, "Point sets must have same last dimension.");
auto idx = at::empty({N, P1}, p1.options().dtype(at::kLong));
// On P100 with pointclouds of size (16, 5000, 3), 128 threads per block
// gives best results.
const int threads = 128;
const dim3 blocks(P1, N);
if (D == 3) {
// Use the specialized kernel for D=3.
AT_DISPATCH_FLOATING_TYPES(
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
size_t shared_size =
threads * sizeof(size_t) + threads * sizeof(int64_t);
hipLaunchKernelGGL(( NearestNeighborKernelD3<scalar_t>), dim3(blocks), dim3(threads), shared_size, 0,
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2);
}));
} else {
// Use the general kernel for all other D.
AT_DISPATCH_FLOATING_TYPES(
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
// To avoid misaligned memory access, the size of shared buffers
// need to be rounded to the next even size.
size_t D_2 = D + (D % 2);
size_t shared_size = (D_2 + threads) * sizeof(size_t);
shared_size += threads * sizeof(int64_t);
hipLaunchKernelGGL(( NearestNeighborKernel<scalar_t>), dim3(blocks), dim3(threads), shared_size, 0,
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2,
D,
D_2);
}));
}
return idx;
}
| d0970ce039f69eb3c48c8f39421c588289b05934.cu | // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#include <ATen/ATen.h>
#include <float.h>
template <typename scalar_t>
__device__ void WarpReduce(
volatile scalar_t* min_dists,
volatile int64_t* min_idxs,
const size_t tid) {
// s = 32
if (min_dists[tid] > min_dists[tid + 32]) {
min_idxs[tid] = min_idxs[tid + 32];
min_dists[tid] = min_dists[tid + 32];
}
// s = 16
if (min_dists[tid] > min_dists[tid + 16]) {
min_idxs[tid] = min_idxs[tid + 16];
min_dists[tid] = min_dists[tid + 16];
}
// s = 8
if (min_dists[tid] > min_dists[tid + 8]) {
min_idxs[tid] = min_idxs[tid + 8];
min_dists[tid] = min_dists[tid + 8];
}
// s = 4
if (min_dists[tid] > min_dists[tid + 4]) {
min_idxs[tid] = min_idxs[tid + 4];
min_dists[tid] = min_dists[tid + 4];
}
// s = 2
if (min_dists[tid] > min_dists[tid + 2]) {
min_idxs[tid] = min_idxs[tid + 2];
min_dists[tid] = min_dists[tid + 2];
}
// s = 1
if (min_dists[tid] > min_dists[tid + 1]) {
min_idxs[tid] = min_idxs[tid + 1];
min_dists[tid] = min_dists[tid + 1];
}
}
// CUDA kernel to compute nearest neighbors between two batches of pointclouds
// where each point is of dimension D.
//
// Args:
// points1: First set of points, of shape (N, P1, D).
// points2: Second set of points, of shape (N, P2, D).
// idx: Output memory buffer of shape (N, P1).
// N: Batch size.
// P1: Number of points in points1.
// P2: Number of points in points2.
// D_2: Size of the shared buffer; this is D rounded up so that memory access
// is aligned.
//
template <typename scalar_t>
__global__ void NearestNeighborKernel(
const scalar_t* __restrict__ points1,
const scalar_t* __restrict__ points2,
int64_t* __restrict__ idx,
const size_t N,
const size_t P1,
const size_t P2,
const size_t D,
const size_t D_2) {
// Each block will compute one element of the output idx[n, i]. Within the
// block we will use threads to compute the distances between points1[n, i]
// and points2[n, j] for all 0 <= j < P2, then use a block reduction to
// take an argmin of the distances.
// Shared buffers for the threads in the block. CUDA only allows declaration
// of a single shared buffer, so it needs to be manually sliced and cast to
// build several logical shared buffers of different types.
extern __shared__ char shared_buf[];
scalar_t* x = (scalar_t*)shared_buf; // scalar_t[DD]
scalar_t* min_dists = &x[D_2]; // scalar_t[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t n = blockIdx.y; // index of batch element.
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x;
// Thread 0 copies points1[n, i, :] into x.
if (tid == 0) {
for (size_t d = 0; d < D; d++) {
x[d] = points1[n * (P1 * D) + i * D + d];
}
}
__syncthreads();
// Compute the distances between points1[n, i] and points2[n, j] for
// all 0 <= j < P2. Here each thread will reduce over P2 / blockDim.x
// in serial, and store its result to shared memory
scalar_t min_dist = FLT_MAX;
size_t min_idx = 0;
for (size_t j = tid; j < P2; j += blockDim.x) {
scalar_t dist = 0;
for (size_t d = 0; d < D; d++) {
scalar_t x_d = x[d];
scalar_t y_d = points2[n * (P2 * D) + j * D + d];
scalar_t diff = x_d - y_d;
dist += diff * diff;
}
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? j : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<scalar_t>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx[n * P1 + i] = min_idxs[0];
}
}
// CUDA kernel to compute nearest neighbors between two sets of 3-dimensional
// pointclouds. This is a specialization of the nearest_neighbor_kernel
// to the case D=3.
//
// Args:
// points1: First set of pointclouds, of shape (N, P1, 3).
// points2: Second set of pointclouds, of shape (N, P2, 3).
// idx: Output memory buffer of shape (N, P1).
// N: Batch size.
// P1: Number of points in points1.
// P2: Number of points in points2.
//
template <typename scalar_t>
__global__ void NearestNeighborKernelD3(
const scalar_t* __restrict__ points1,
const scalar_t* __restrict__ points2,
int64_t* __restrict__ idx,
const size_t N,
const size_t P1,
const size_t P2) {
// Single shared memory buffer which is split and cast to different types.
extern __shared__ char shared_buf[];
scalar_t* min_dists = (scalar_t*)shared_buf; // scalar_t[NUM_THREADS]
int64_t* min_idxs = (int64_t*)&min_dists[blockDim.x]; // int64_t[NUM_THREADS]
const size_t D = 3;
const size_t n = blockIdx.y; // index of batch element.
const size_t i = blockIdx.x; // index of point within batch element.
const size_t tid = threadIdx.x;
// Retrieve the coordinates of points1[n, i] from global memory; these
// will be stored in registers for fast access.
const scalar_t x = points1[n * (P1 * D) + i * D + 0];
const scalar_t y = points1[n * (P1 * D) + i * D + 1];
const scalar_t z = points1[n * (P1 * D) + i * D + 2];
// Compute distances between points1[n, i] and all points2[n, j]
// for 0 <= j < P2
scalar_t min_dist = FLT_MAX;
size_t min_idx = 0;
// Distance computation for points in p2 spread across threads in the block.
for (size_t j = tid; j < P2; j += blockDim.x) {
scalar_t dx = x - points2[n * (P2 * D) + j * D + 0];
scalar_t dy = y - points2[n * (P2 * D) + j * D + 1];
scalar_t dz = z - points2[n * (P2 * D) + j * D + 2];
scalar_t dist = dx * dx + dy * dy + dz * dz;
min_dist = (j == tid) ? dist : min_dist;
min_idx = (dist <= min_dist) ? j : min_idx;
min_dist = (dist <= min_dist) ? dist : min_dist;
}
min_dists[tid] = min_dist;
min_idxs[tid] = min_idx;
// Synchronize local threads writing to the shared memory buffer.
__syncthreads();
// Perform reduction in shared memory.
for (int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
if (min_dists[tid] > min_dists[tid + s]) {
min_dists[tid] = min_dists[tid + s];
min_idxs[tid] = min_idxs[tid + s];
}
}
// Synchronize local threads so that min_dists is correct.
__syncthreads();
}
// Unroll the last 6 iterations of the loop since they will happen
// synchronized within a single warp.
if (tid < 32)
WarpReduce<scalar_t>(min_dists, min_idxs, tid);
// Finally thread 0 writes the result to the output buffer.
if (tid == 0) {
idx[n * P1 + i] = min_idxs[0];
}
}
at::Tensor NearestNeighborIdxCuda(at::Tensor p1, at::Tensor p2) {
const auto N = p1.size(0);
const auto P1 = p1.size(1);
const auto P2 = p2.size(1);
const auto D = p1.size(2);
AT_ASSERTM(p2.size(2) == D, "Point sets must have same last dimension.");
auto idx = at::empty({N, P1}, p1.options().dtype(at::kLong));
// On P100 with pointclouds of size (16, 5000, 3), 128 threads per block
// gives best results.
const int threads = 128;
const dim3 blocks(P1, N);
if (D == 3) {
// Use the specialized kernel for D=3.
AT_DISPATCH_FLOATING_TYPES(
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
size_t shared_size =
threads * sizeof(size_t) + threads * sizeof(int64_t);
NearestNeighborKernelD3<scalar_t><<<blocks, threads, shared_size>>>(
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2);
}));
} else {
// Use the general kernel for all other D.
AT_DISPATCH_FLOATING_TYPES(
p1.scalar_type(), "nearest_neighbor_v3_cuda", ([&] {
// To avoid misaligned memory access, the size of shared buffers
// need to be rounded to the next even size.
size_t D_2 = D + (D % 2);
size_t shared_size = (D_2 + threads) * sizeof(size_t);
shared_size += threads * sizeof(int64_t);
NearestNeighborKernel<scalar_t><<<blocks, threads, shared_size>>>(
p1.data_ptr<scalar_t>(),
p2.data_ptr<scalar_t>(),
idx.data_ptr<int64_t>(),
N,
P1,
P2,
D,
D_2);
}));
}
return idx;
}
|
8364d52a40b1c0c2fbdb0aec3bbab62bd955c681.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Label Equivalence with optimization introduced by Kalentev (OLE stands for Optimized Label Equivalence)
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Init phase.
// Labels start at value 1, to differentiate them from background, that has value 0.
__global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < img.rows && global_col < img.cols) {
labels[labels_index] = img[img_index] ? (labels_index + 1) : 0;
}
}
__device__ unsigned int MinLabel(unsigned l1, unsigned l2) {
if (l1 && l2)
return min(l1, l2);
else
return l1;
}
__device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) {
unsigned int min = label;
if (row > 0) {
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]);
}
if (row < labels.rows - 1) {
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]);
}
if (col > 0)
min = MinLabel(min, labels.data[labels_index - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + 1]);
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Scan(cuda::PtrStepSzi labels, char *changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned min_label = FindMinLabel(labels, row, col, label, labels_index);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
}
// Analysis phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Analyze(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned index = labels_index;
while (label - 1 != index) {
index = label - 1;
label = labels[index];
}
labels[labels_index] = label;
}
}
}
}
class OLE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
char changes;
char *d_changes;
public:
OLE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMalloc(&d_changes, sizeof(char));
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
hipFree(d_changes);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
hipMalloc(&d_changes, sizeof(char));
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
hipFree(d_changes);
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
while (true) {
changes = 0;
hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes);
hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(OLE);
| 8364d52a40b1c0c2fbdb0aec3bbab62bd955c681.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Label Equivalence with optimization introduced by Kalentev (OLE stands for Optimized Label Equivalence)
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Init phase.
// Labels start at value 1, to differentiate them from background, that has value 0.
__global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < img.rows && global_col < img.cols) {
labels[labels_index] = img[img_index] ? (labels_index + 1) : 0;
}
}
__device__ unsigned int MinLabel(unsigned l1, unsigned l2) {
if (l1 && l2)
return min(l1, l2);
else
return l1;
}
__device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned row, unsigned col, unsigned label, unsigned labels_index) {
unsigned int min = label;
if (row > 0) {
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]);
}
if (row < labels.rows - 1) {
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]);
if (col > 0)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]);
}
if (col > 0)
min = MinLabel(min, labels.data[labels_index - 1]);
if (col < labels.cols - 1)
min = MinLabel(min, labels.data[labels_index + 1]);
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Scan(cuda::PtrStepSzi labels, char *changes) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned min_label = FindMinLabel(labels, row, col, label, labels_index);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
}
// Analysis phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Analyze(cuda::PtrStepSzi labels) {
unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = row * (labels.step / labels.elem_size) + col;
if (row < labels.rows && col < labels.cols) {
unsigned label = labels[labels_index];
if (label) {
unsigned index = labels_index;
while (label - 1 != index) {
index = label - 1;
label = labels[index];
}
labels[labels_index] = label;
}
}
}
}
class OLE : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
char changes;
char *d_changes;
public:
OLE() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMalloc(&d_changes, sizeof(char));
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
cudaFree(d_changes);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
cudaMalloc(&d_changes, sizeof(char));
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
cudaFree(d_changes);
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
Init << <grid_size_, block_size_ >> >(d_img_, d_img_labels_);
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_img_labels_, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_img_labels_);
}
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(OLE);
|
37dd4b353c529f4294341a8188821e35bed7d798.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
float expClassApprox[ElementsPerThread];
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
expClassApprox[j] = __expf(classApprox[j]);
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const int lastRowToWrite = der2Row;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
hipLaunchKernelGGL(( MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads >), dim3(numBlocks), dim3(blockSize), 0, stream, targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 targetClass = static_cast<ui8>(__ldg(targetClasses + i));
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, bins);
}
}
}
| 37dd4b353c529f4294341a8188821e35bed7d798.cu | #include "multilogit.cuh"
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
float classApprox[ElementsPerThread];
float expClassApprox[ElementsPerThread];
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
ui32 loadApproxIndex[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadApproxIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize)) : 0;
}
const float tmp = targetClass[j] < effectiveClassCount && idx < size ? __ldg(predictions + loadApproxIndex[j] + targetClass[j] * predictionsAlignSize) : 0.0f;
classApprox[j] = tmp - maxApprox[j];
expClassApprox[j] = __expf(classApprox[j]);
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) : 0.0f;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
if (der && idx < size) {
for (int k = 0; k < effectiveClassCount; ++k) {
const float pk = __expf(__ldg(predictions + loadApproxIndex[j] + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der[idx + k * derAlignSize] = weight[j] * ((targetClass[j] == k ? 1.0f : 0.0f) - pk);
}
}
if (functionValue) {
const float logDenum = __logf(sumExpApproxForAllClasses[j]);
tmpScore += (idx < size) ? weight[j] * (classApprox[j] - logDenum) : 0;
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiLogitSecondDerRowImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
int der2Row,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
const int effectiveClassCount = numClasses - 1;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float sumExpApproxForAllClasses[ElementsPerThread];
float weight[ElementsPerThread];
float maxApprox[ElementsPerThread];
{
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
maxApprox[j] = 0;
for (int k = 0; k < effectiveClassCount; ++k) {
maxApprox[j] = idx < size ? max(maxApprox[j], __ldg(predictions + idx + k * predictionsAlignSize)) : 0;
}
sumExpApproxForAllClasses[j] = 0.0f;
for (int k = 0; k < effectiveClassCount; ++k) {
sumExpApproxForAllClasses[j] += idx < size ? __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) : 0;
}
sumExpApproxForAllClasses[j] += __expf(0.0f - maxApprox[j]);
}
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const int lastRowToWrite = der2Row;
if (idx < size) {
float pRow = 0;
if (der2Row < effectiveClassCount) {
pRow = __expf(__ldg(predictions + idx + der2Row * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
} else {
pRow = __expf(-maxApprox[j]) / sumExpApproxForAllClasses[j];
}
for (int k = 0; k < der2Row; ++k) {
const float pk = __expf(__ldg(predictions + idx + k * predictionsAlignSize) - maxApprox[j]) / sumExpApproxForAllClasses[j];
der2[idx + k * der2AlignSize] = -weight[j] * pk * pRow;
}
der2[idx + der2Row * der2AlignSize] = weight[j] * (1.0 - pRow) * pRow;
}
}
}
void MultiLogitValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiLogitValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiLogitSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
int der2Row, ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiLogitSecondDerRowImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2Row, der2AlignSize, der2);
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllValAndFirstDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
const ui32* loadPredictionsIndices,
ui64 predictionsAlignSize,
float* functionValue,
float* der,
ui64 derAlignSize) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
float tmpScore = 0;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
ui32 loadPredictionIndex[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
loadPredictionIndex[j] = loadPredictionsIndices && idx < size ? __ldg(loadPredictionsIndices + idx) : idx;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + loadPredictionIndex[j] + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
const float direction = c - p;
if (der && idx < size) {
der[idx + clazz * derAlignSize] = weight[j] * direction;
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1 + expVal) : val;
tmpScore += (idx < size) ? weight[j] * (c * val - logExpValPlusOne) / numClasses : 0;
}
}
}
if (functionValue) {
__shared__ float tmpScores[BlockSize];
tmpScores[threadIdx.x] = tmpScore;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BlockSize);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
template <int BlockSize, int ElementsPerThread>
__launch_bounds__(BlockSize, 2048 / BlockSize)
__global__ void MultiClassOneVsAllSecondDerImpl(const float* targetClasses, int numClasses, ui32 size,
const float* weights,
const float* predictions,
ui64 predictionsAlignSize,
ui64 der2AlignSize,
float* der2) {
ui32 tid = blockIdx.x * BlockSize * ElementsPerThread + threadIdx.x;
ui8 targetClass[ElementsPerThread];
float weight[ElementsPerThread];
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
targetClass[j] = idx < size ? static_cast<ui8>(__ldg(targetClasses + idx)) : 0;
weight[j] = (weights && (idx < size)) ? weights[idx] : 1.0f;
}
for (int clazz = 0; clazz < numClasses; ++clazz) {
#pragma unroll
for (int j = 0; j < ElementsPerThread; ++j) {
const int idx = tid + j * BlockSize;
const float val = idx < size ? __ldg(predictions + idx + clazz * predictionsAlignSize) : 0.0f;
const float expVal = __expf(val);
const float p = ClipProb(expVal / (1.0f + expVal));
const float c = clazz == targetClass[j] ? 1.0f : 0.0f;
if (der2 && idx < size) {
der2[idx + clazz * der2AlignSize] = weight[j] * p * (1.0f - p);
}
}
}
}
void MultiClassOneVsAllValueAndDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
const ui32* loadPredictionsIndices,
float* functionValue,
float* der, ui32 derAlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
//TODO: get rid of this
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (numBlocks) {
MultiClassOneVsAllValAndFirstDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, loadPredictionsIndices, predictionsAlignSize, functionValue, der, derAlignSize);
}
}
void MultiClassOneVsAllSecondDer(const float* targetClasses, int numClasses,
const float* targetWeights,
ui32 size,
const float* predictions, ui32 predictionsAlignSize,
float* der2,
ui32 der2AlignSize,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 elementsPerThreads = 2;
const ui32 numBlocks = CeilDivide<ui32>(size, elementsPerThreads * blockSize);
if (numBlocks) {
MultiClassOneVsAllSecondDerImpl < blockSize, elementsPerThreads ><<<numBlocks, blockSize, 0, stream>>>(targetClasses, numClasses, size, targetWeights, predictions, predictionsAlignSize, der2AlignSize, der2);
}
}
__global__ void BuildConfusionMatrixBinsImpl(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, ui32 predictionsDim,
ui64 predictionsAlignSize,
ui32* bins) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
const ui32 targetClass = static_cast<ui8>(__ldg(targetClasses + i));
float bestApprox = NegativeInfty();
int bestClass = -1;
predictions += i;
for (int clazz = 0; clazz < numClasses; ++clazz) {
const float approx = clazz < predictionsDim ? __ldg(predictions + clazz * predictionsAlignSize) : 0.0f;
if (approx > bestApprox) {
bestApprox = approx;
bestClass = clazz;
}
}
bins[i] = bestClass * numClasses + targetClass;
}
}
void BuildConfusionMatrixBins(const float* targetClasses, int numClasses, ui32 size,
const float* predictions, int predictionsDim, ui32 predictionsAlignSize,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks) {
BuildConfusionMatrixBinsImpl << < numBlocks, blockSize, 0, stream >> >(targetClasses, numClasses, size, predictions, predictionsDim, predictionsAlignSize, bins);
}
}
}
|
1607d27299bfaeeccc47ca285d03e54c25261c4b.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 1607d27299bfaeeccc47ca285d03e54c25261c4b.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
838749f8647839c8923f3c9638d7e4f9fa6bd677.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col) {
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value) {
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void print(Matrix X) {
for (int i = 0; i < X.height; i++) {
for (int j = 0; j < X.width; j++) {
std::cout << X.elements[i * X.width + j] << " ";
}
std::cout << "\n";
}
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
dim3 dimGrid((B.width - 1) / dimBlock.x + 1,
(A.height - 1) / dimBlock.y + 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipEventRecord(stop);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else {
print(C);
}
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main() {
int n, m, q;
scanf("%d", &n);
m = q = n;
Matrix A;
Matrix B;
Matrix C;
int sizeA = n * m * sizeof(float);
A.height = n;
A.width = m;
A.elements = new float[sizeA];
int sizeB = m * q * sizeof(float);
B.height = m;
B.width = q;
B.elements = new float[sizeB];
int sizeC = n * q * sizeof(float);
C.height = n;
C.width = q;
C.elements = new float[sizeC];
srand (time(NULL));
for(int i = 0; i < n*m; i++)
scanf("%f", &A.elements[i]);
for (int i = 0; i < m * q; i++)
scanf("%f", &B.elements[i]);
//print(A);
//printf("\n");
//print(B);
//printf("\n");
MatMul(A, B, C);
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
//if ((blockRow * blockDim.y + row < C.height) && (blockCol * blockDim.x + col < C.width)) {
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e) {
Cvalue += As[row][e] * Bs[e][col];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
//}
}
| 838749f8647839c8923f3c9638d7e4f9fa6bd677.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col) {
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value) {
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void print(Matrix X) {
for (int i = 0; i < X.height; i++) {
for (int j = 0; j < X.width; j++) {
std::cout << X.elements[i * X.width + j] << " ";
}
std::cout << "\n";
}
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
dim3 dimGrid((B.width - 1) / dimBlock.x + 1,
(A.height - 1) / dimBlock.y + 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaEventRecord(stop);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else {
print(C);
}
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
int n, m, q;
scanf("%d", &n);
m = q = n;
Matrix A;
Matrix B;
Matrix C;
int sizeA = n * m * sizeof(float);
A.height = n;
A.width = m;
A.elements = new float[sizeA];
int sizeB = m * q * sizeof(float);
B.height = m;
B.width = q;
B.elements = new float[sizeB];
int sizeC = n * q * sizeof(float);
C.height = n;
C.width = q;
C.elements = new float[sizeC];
srand (time(NULL));
for(int i = 0; i < n*m; i++)
scanf("%f", &A.elements[i]);
for (int i = 0; i < m * q; i++)
scanf("%f", &B.elements[i]);
//print(A);
//printf("\n");
//print(B);
//printf("\n");
MatMul(A, B, C);
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
//if ((blockRow * blockDim.y + row < C.height) && (blockCol * blockDim.x + col < C.width)) {
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e) {
Cvalue += As[row][e] * Bs[e][col];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
//}
}
|
a7ab27b056b7a4ba06e4e7e32360c7d9acf5e930.hip | // !!! This is a file automatically generated by hipify!!!
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <hip/hip_runtime.h>
// CMSSW includes
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "CUDACore/host_unique_ptr.h"
#include "CUDADataFormats/gpuClusteringConstants.h"
#include "CondFormats/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/DetId.h"
#include "DataFormats/FEDNumbering.h"
#include "DataFormats/PixelSubdetector.h"
#include "DataFormats/SiPixelDigiConstants.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
#include "gpuCalibPixel.h"
#include "gpuClusterChargeCut.h"
#include "gpuClustering.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, hipHostMallocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = ::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = ::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
hipStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
hipMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream));
cudaCheck(hipMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream));
// Launch rawToDigi kernel
hipLaunchKernelGGL(( RawToDigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis), dim3(blocks), dim3(threadsPerBlock), 0, stream, isRun2,
digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
hipLaunchKernelGGL(( countModules), dim3(blocks), dim3(threadsPerBlock), 0, stream,
digis_d.moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(hipGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(hipMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), hipMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
hipLaunchKernelGGL(( findClus), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// apply charge cut
hipLaunchKernelGGL(( clusterChargeCut), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds,
digis_d.moduleInd(),
digis_d.adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
hipLaunchKernelGGL(( fillHitsModuleStart), dim3(1), dim3(1024), 0, stream, clusters_d.clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(hipMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::maxNumModules,
sizeof(uint32_t),
hipMemcpyDefault,
stream));
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
| a7ab27b056b7a4ba06e4e7e32360c7d9acf5e930.cu | /* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <cuda_runtime.h>
// CMSSW includes
#include "CUDACore/cudaCheck.h"
#include "CUDACore/device_unique_ptr.h"
#include "CUDACore/host_unique_ptr.h"
#include "CUDADataFormats/gpuClusteringConstants.h"
#include "CondFormats/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/DetId.h"
#include "DataFormats/FEDNumbering.h"
#include "DataFormats/PixelSubdetector.h"
#include "DataFormats/SiPixelDigiConstants.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
#include "gpuCalibPixel.h"
#include "gpuClusterChargeCut.h"
#include "gpuClustering.h"
namespace pixelgpudetails {
// number of words for all the FEDs
constexpr uint32_t MAX_FED_WORDS = pixelgpudetails::MAX_FED * pixelgpudetails::MAX_WORD;
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender() {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(MAX_FED_WORDS, cudaHostAllocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = std::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = std::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
bool useQualityInfo,
bool includeErrors,
bool debug,
cudaStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << pixelgpudetails::MAX_FED_WORDS << std::endl;
#endif
digis_d = SiPixelDigisCUDA(pixelgpudetails::MAX_FED_WORDS, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(pixelgpudetails::MAX_FED_WORDS, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
cudaMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream));
cudaCheck(cudaMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream));
// Launch rawToDigi kernel
RawToDigi_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(std::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
gpuCalibPixel::calibDigis<<<blocks, threadsPerBlock, 0, stream>>>(isRun2,
digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
countModules<<<blocks, threadsPerBlock, 0, stream>>>(
digis_d.moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(cudaGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(cudaMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), cudaMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
findClus<<<blocks, threadsPerBlock, 0, stream>>>(digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// apply charge cut
clusterChargeCut<<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds,
digis_d.moduleInd(),
digis_d.adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
fillHitsModuleStart<<<1, 1024, 0, stream>>>(clusters_d.clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(cudaMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::maxNumModules,
sizeof(uint32_t),
cudaMemcpyDefault,
stream));
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
|
162ed45c41fe98293eb1ccf3811f3e4a3bfd6ee2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
// and PyTorch ROIAlign (non-rotated) Op implementations.
// The key difference between this implementation and those ones is
// we don't do "legacy offset" in this version, as there aren't many previous
// works, if any, using the "legacy" ROIAlignRotated Op.
// This would make the interface a bit cleaner.
namespace detectron2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
} // namespace
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (inte gral) pooling inside a bin
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T val = bilinear_interpolate(offset_input, height, width, y, x);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "ROIAlignRotated_forward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(
grad.scalar_type(), "ROIAlignRotated_backward", [&] {
hipLaunchKernelGGL(( RoIAlignRotatedBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace detectron2
| 162ed45c41fe98293eb1ccf3811f3e4a3bfd6ee2.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
// and PyTorch ROIAlign (non-rotated) Op implementations.
// The key difference between this implementation and those ones is
// we don't do "legacy offset" in this version, as there aren't many previous
// works, if any, using the "legacy" ROIAlignRotated Op.
// This would make the interface a bit cleaner.
namespace detectron2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
const int height,
const int width,
T y,
T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y < 0) {
y = 0;
}
if (x < 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
} // namespace
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* rois,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (inte gral) pooling inside a bin
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T val = bilinear_interpolate(offset_input, height, width, y, x);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__global__ void RoIAlignRotatedBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* current_roi = rois + n * 6;
int roi_batch_ind = current_roi[0];
// Do not use rounding; this implementation detail is critical
// ROIAlignRotated supports align == true, i.e., continuous coordinate
// by default, thus the 0.5 offset
T offset = (T)0.5;
T roi_center_w = current_roi[1] * spatial_scale - offset;
T roi_center_h = current_roi[2] * spatial_scale - offset;
T roi_width = current_roi[3] * spatial_scale;
T roi_height = current_roi[4] * spatial_scale;
T theta = current_roi[5] * M_PI / 180.0;
T cos_theta = cos(theta);
T sin_theta = sin(theta);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T y = yy * cos_theta - xx * sin_theta + roi_center_h;
T x = yy * sin_theta + xx * cos_theta + roi_center_w;
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignRotatedBackward
at::Tensor ROIAlignRotated_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(
input.scalar_type(), "ROIAlignRotated_forward", [&] {
RoIAlignRotatedForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(
grad.scalar_type(), "ROIAlignRotated_backward", [&] {
RoIAlignRotatedBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace detectron2
|
48268a0f68f0c830edae42bf7a089ebb4644221e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Gates.h"
#include <stdio.h>
// Gates_cu struct contains all the computation parameters
#define GRID_SIZE 900
#define MAX_TILE 1024
#define UNROLL 64
#define CASES 1
#ifdef __cplusplus
extern "C" {
#endif
extern __device__ float pdf_m[36][10] = {
{ 0.0000, 0.0040, 0.0080, 0.0120, 0.0160, 0.0199, 0.0239, 0.0279, 0.0319, 0.0359 },
{ 0.0398, 0.0438, 0.0478, 0.0517, 0.0557, 0.0596, 0.0636, 0.0675, 0.0714, 0.0753 },
{ 0.0793, 0.0832, 0.0871, 0.0910, 0.0948, 0.0987, 0.1026, 0.1064, 0.1103, 0.1141 },
{ 0.1179, 0.1217, 0.1255, 0.1293, 0.1331, 0.1368, 0.1406, 0.1443, 0.1480, 0.1517 },
{ 0.1554, 0.1591, 0.1628, 0.1664, 0.1700, 0.1736, 0.1772, 0.1808, 0.1844, 0.1879 },
{ 0.1915, 0.1950, 0.1985, 0.2019, 0.2054, 0.2088, 0.2123, 0.2157, 0.2190, 0.2224 },
{ 0.2257, 0.2291, 0.2324, 0.2357, 0.2389, 0.2422, 0.2454, 0.2486, 0.2517, 0.2549 },
{ 0.2580, 0.2611, 0.2642, 0.2673, 0.2704, 0.2734, 0.2764, 0.2794, 0.2823, 0.2852 },
{ 0.2881, 0.2910, 0.2939, 0.2967, 0.2995, 0.3023, 0.3051, 0.3078, 0.3106, 0.3133 },
{ 0.3159, 0.3186, 0.3212, 0.3238, 0.3264, 0.3289, 0.3315, 0.3340, 0.3365, 0.3389 },
{ 0.3413, 0.3438, 0.3461, 0.3485, 0.3508, 0.3531, 0.3554, 0.3577, 0.3599, 0.3621 },
{ 0.3643, 0.3665, 0.3686, 0.3708, 0.3729, 0.3749, 0.3770, 0.3790, 0.3810, 0.3830 },
{ 0.3849, 0.3869, 0.3888, 0.3907, 0.3925, 0.3944, 0.3962, 0.3980, 0.3997, 0.4015 },
{ 0.4032, 0.4049, 0.4066, 0.4082, 0.4099, 0.4115, 0.4131, 0.4147, 0.4162, 0.4177 },
{ 0.4192, 0.4207, 0.4222, 0.4236, 0.4251, 0.4265, 0.4279, 0.4292, 0.4306, 0.4319 },
{ 0.4332, 0.4345, 0.4357, 0.4370, 0.4382, 0.4394, 0.4406, 0.4418, 0.4429, 0.4441 },
{ 0.4452, 0.4463, 0.4474, 0.4484, 0.4495, 0.4505, 0.4515, 0.4525, 0.4535, 0.4545 },
{ 0.4554, 0.4564, 0.4573, 0.4582, 0.4591, 0.4599, 0.4608, 0.4616, 0.4625, 0.4633 },
{ 0.4641, 0.4649, 0.4656, 0.4664, 0.4671, 0.4678, 0.4686, 0.4693, 0.4699, 0.4706 },
{ 0.4713, 0.4719, 0.4726, 0.4732, 0.4738, 0.4744, 0.4750, 0.4756, 0.4761, 0.4767 },
{ 0.4772, 0.4778, 0.4783, 0.4788, 0.4793, 0.4798, 0.4803, 0.4808, 0.4812, 0.4817 },
{ 0.4821, 0.4826, 0.4830, 0.4834, 0.4838, 0.4842, 0.4846, 0.4850, 0.4854, 0.4857 },
{ 0.4861, 0.4864, 0.4868, 0.4871, 0.4875, 0.4878, 0.4881, 0.4884, 0.4887, 0.4890 },
{ 0.4893, 0.4896, 0.4898, 0.4901, 0.4904, 0.4906, 0.4909, 0.4911, 0.4913, 0.4916 },
{ 0.4918, 0.4920, 0.4922, 0.4925, 0.4927, 0.4929, 0.4931, 0.4932, 0.4934, 0.4936 },
{ 0.4938, 0.4940, 0.4941, 0.4943, 0.4945, 0.4946, 0.4948, 0.4949, 0.4951, 0.4952 },
{ 0.4953, 0.4955, 0.4956, 0.4957, 0.4959, 0.4960, 0.4961, 0.4962, 0.4963, 0.4964 },
{ 0.4965, 0.4966, 0.4967, 0.4968, 0.4969, 0.4970, 0.4971, 0.4972, 0.4973, 0.4974 },
{ 0.4974, 0.4975, 0.4976, 0.4977, 0.4977, 0.4978, 0.4979, 0.4979, 0.4980, 0.4981 },
{ 0.4981, 0.4982, 0.4982, 0.4983, 0.4984, 0.4984, 0.4985, 0.4985, 0.4986, 0.4986 },
{ 0.4987, 0.4987, 0.4987, 0.4988, 0.4988, 0.4989, 0.4989, 0.4989, 0.4990, 0.4990 },
{ 0.4990, 0.4991, 0.4991, 0.4991, 0.4992, 0.4992, 0.4992, 0.4992, 0.4993, 0.4993 },
{ 0.4993, 0.4993, 0.4994, 0.4994, 0.4994, 0.4994, 0.4994, 0.4995, 0.4995, 0.4995 },
{ 0.4995, 0.4995, 0.4995, 0.4996, 0.4996, 0.4996, 0.4996, 0.4996, 0.4996, 0.4997 },
{ 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4998 },
{ 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998 },
};
extern __device__ float row[36] = { 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5 };
extern __device__ float col[10] = { 0.00, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09 };
#ifdef __cplusplus
}
#endif
texture<int, 1, hipReadModeElementType> count_ter;
texture<int, 1, hipReadModeElementType> start_ter;
//texture<int, 1, hipReadModeElementType> edges_ter;
//texture<int, 1, hipReadModeElementType> sort_ter;
texture<float, 1, hipReadModeElementType> k_m_ter;
typedef struct {
float mu;
float sigma;
}mu_sigma_struct;
__device__ float integrate1(float beta){
bool positive;
float b = abs(beta);
float result;
int flag = 0;
int i, j;
for (i = 0; i < 36; ++i){
for (j = 0; j < 10; ++j){
if (b >(row[i] + col[j]))
continue;
else{
flag = 1;
break;
}
}
if (flag)
break;
}
if ((i - 1) < 0 || (j - 1) < 0)
result = pdf_m[i][j];
else if (i > 35 || j > 35)
result = pdf_m[i - 1][j - 1];
else
result = 0.5*(pdf_m[i][j] + pdf_m[i - 1][j - 1]);
if (beta >= 0){
return result + 0.5;
}
else{
return 0.5 - result;
}
}
__device__ float integrate(float beta){
float step = 0.01f;
float down_idx = -5;
int up_idx = (int)(beta - down_idx) / step;
float micro = down_idx;
float sum = 0.0f;
float tmp;
for (int ii = 0; ii < up_idx; ii++){
tmp = pow(micro, 2) / 2;
sum += pow(2.718281828f, -tmp) * step;
micro += step;
}
return sum / sqrt(2 * 3.141592654f);
}
__device__ void sum_function(mu_sigma_struct max_strc, Gates_cu* currentG, float* k_para_matrix, int no_of_pc, \
int max_k_idx){
currentG->delay_mu = currentG->gate_mu + max_strc.mu;
int offset = 2 * no_of_pc;
int g_id_tmp = currentG->id;
float tmp = 0.0f;
float tmp2 = 0.0f;
float sigma = 0.0f;
for (int i = 0; i < offset; i++)
{
//tmp = tex1Dfetch(k_m_ter, (g_id_tmp * 2 * no_of_pc + i)) + tex1Dfetch(k_m_ter, (max_k_idx * 2 * no_of_pc + i));
tmp = k_para_matrix[g_id_tmp * 2 * no_of_pc + i] + k_para_matrix[max_k_idx * 2 * no_of_pc + i];
//tmp = l_k_m[idx * 40 + i] + l_k_m[idx * 40 + i];
sigma += pow(tmp, 2);
}
currentG->delay_sigma = sqrt(sigma);
}
__device__ void max_function(Gates_cu* gates_t, Gates_cu* currentG, int* edges_t, int no_of_pc, float* k_para_matrix, \
int bidx, int current_level){
int e_idx = edges_t[currentG->start_in];
float mu_1, mu_2, sigma_1, sigma_2;
int offset = 2 * no_of_pc;
int max_k_idx;
mu_sigma_struct max_strc;
int current_k_idx;
max_strc.mu = gates_t[e_idx].delay_mu;
max_strc.sigma = gates_t[e_idx].delay_sigma;
max_k_idx = gates_t[e_idx].id;
for (int i = currentG->start_in + 1; i < currentG->start_in + currentG->no_of_in; i++){
e_idx = edges_t[i];
mu_1 = max_strc.mu;
sigma_1 = max_strc.sigma;
mu_2 = gates_t[e_idx].delay_mu;
sigma_2 = gates_t[e_idx].delay_sigma;
current_k_idx = gates_t[e_idx].id;
if (mu_1 - 3 * sigma_1 > mu_2 + 3 * sigma_2)
{
continue;
}
if (mu_1 + 3 * sigma_1 < mu_2 - 3 * sigma_2)
{
max_strc.mu = mu_2;
max_strc.sigma = sigma_2;
max_k_idx = current_k_idx;
continue;
}
//step 2
float co_variance = 0.0f;
float correlation = 0.0f;
for (int j = 0; j < offset; j++){
//co_variance += tex1Dfetch(k_m_ter, (max_k_idx * offset + j)) * tex1Dfetch(k_m_ter, (current_k_idx * offset + j));
co_variance += k_para_matrix[max_k_idx * offset + j] * k_para_matrix[current_k_idx * offset + j];
//co_variance += k_l[fake + j] * k_l[fake + j];
}
correlation = co_variance / (sigma_1 * sigma_2);
if (correlation > 0.99 && abs(sigma_1 - sigma_2) < 0.1){
if (mu_1 > mu_2){
continue;
}
else{
max_strc.mu = mu_2;
max_strc.sigma = sigma_2;
max_k_idx = current_k_idx;
continue;
}
}
//step 3
float alpha = sqrt(abs(pow(sigma_1, 2) + pow(sigma_2, 2) - 2 * co_variance));
float beta = (mu_1 - mu_2) / alpha;
float phi = pow(2.718281828f, -beta*beta / 2) / sqrt(2 * 3.141592654f);
float phi_intg = integrate1(beta);
float phi_intg_m = integrate1(-beta);
float sigma_3, mu_3;
mu_3 = mu_1 * phi_intg + mu_2 * phi_intg_m + alpha * phi;
float sigma_tmp = (pow(mu_1, 2) + pow(sigma_1, 2)) * phi_intg + (pow(mu_2, 2) + pow(sigma_2, 2)) * phi_intg_m + (mu_1 + mu_2) * alpha * phi - mu_3*mu_3;
sigma_3 = sqrt(abs(sigma_tmp));
/*
step 4
float S0 = 0.0f;
for (int j = 0; j < offset; j++){
float r_1 = tex2D(k_m_ter, max_k_idx, j);
float r_2 = tex2D(k_m_ter, current_k_idx, j);
float ar_tmp = (sigma_1*r_1*phi_intg + sigma_2*r_2*phi_intg_m) / sigma_3;
max_k[j] = ar_tmp;
S0 += ar_tmp * ar_tmp;
}
for (int j = 0; j < offset; j++){
max_k[j] = max_k[j] * sigma_3 / sqrt(abs(S0));
}
*/
max_strc.mu = mu_3;
max_strc.sigma = sigma_3;
}
sum_function(max_strc, currentG, k_para_matrix, no_of_pc, max_k_idx);
}
__global__ void PCA(Gates_cu* gates, int no_of_gates, int* sort, int* edges, float* k_param, \
int* l_count, int* l_start, int current_level, int no_of_pc, int max_level){
Gates_cu tmp;
//current_level=0;
int i = threadIdx.x;// +blockIdx.x*blockDim.x;
int idx = threadIdx.x;
int b = blockIdx.x;
//while (current_level < max_level){
int count; int start;
count = l_count[current_level];
start = l_start[current_level];
if (i < count){
tmp = gates[start + i];
if (tmp.is_input)
{
tmp.delay_mu = tmp.gate_mu;
tmp.delay_sigma = tmp.gate_sigma;
}
else{
//--max--//
max_function(gates, &tmp, edges, no_of_pc, k_param, b, current_level); //no change supposed to be on gates[i]
//printf("%f\n", l_gates[idx].delay_sigma);
//--sum--//
//sum_function(max_strc, &l_gates[i], k_param, no_of_pc, gates, edges); // gates[i] param should changes
}
//l_gates[tmp.id%MAX_TILE] = tmp;
gates[start + i] = tmp;
//test(gates, &l_gates[i], edges);
//gates[b][sort[start + i]] = l_gates[idx];
//gates[sort[start + i]] = tmp;
}
//current_level++;
//__syncthreads();
//}
}
__global__ void timing(Gates_cu* gates, int no_of_pc, float* eigen_v, float** eigen_vec, int x_grids, int no_of_gates)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < no_of_gates){
float sigma_of_delay = 0.0f;
gates[k].gate_mu = 0.0f; // This is to init mu
gates[k].gate_sigma = 0.0f; // This is to init sigma
gates[k].gate_mu = gates[k].delay;
int row = (int)floor((float)(gates[k].y / GRID_SIZE));
int column = (int)floor((float)(gates[k].x / GRID_SIZE));
int i_of_j = row*x_grids + column;
float k_tmp = 0.0f;
for (int j = 0; j < no_of_pc; j++)
{
if (eigen_v[j] < 0){
eigen_v[j] = 0;
}
//L = sqr eg_v * eg_vec * sigma, the dRdL is the constain for a specific size of gate, i'll update it
k_tmp = (0.15f / 3)* gates[k].delay * sqrt(eigen_v[j]) * eigen_vec[i_of_j][j];// *sigma_of_L;
sigma_of_delay += k_tmp * k_tmp;
//gates[k].k_param[j] = k_tmp;
}
for (int j = 0; j < no_of_pc; j++)
{
//W = sqr eg_v * eg_vec * sigma, the dRdW is the constain for a specific size of gate, i'll update it
k_tmp = -(0.08f / 3)* gates[k].delay * sqrt(eigen_v[j]) * eigen_vec[i_of_j][j];// *sigma_of_W;
sigma_of_delay += k_tmp * k_tmp;
//gates[k].k_param[j + no_of_pc] = k_tmp;
}
/*----get gate[i]'s sigma----*/
gates[k].gate_sigma = sqrt(sigma_of_delay);
}
}
extern "C" Gates_cu* cuSSTA(Gates_cu* gates, int no_of_gates, int* sort, int* edges, int no_of_edges, int no_of_pc, float *eigen_values, \
float **eigen_vectors, int x_grid, float* k_param, int* l_count, int* l_start, int max_level, Gates* gates_t){
hipError_t cudaStatus;
int* c_sort;
hipMallocManaged(&c_sort, sizeof(int) * no_of_gates);
for (int i = 0; i < no_of_gates; ++i){
c_sort[i] = sort[i];
}
float* k_parameters;
hipMallocManaged(&k_parameters, sizeof(float) * no_of_gates * no_of_pc * 2);
for (int i = 0; i < no_of_gates * no_of_pc * 2; ++i){
k_parameters[i] = k_param[i];
}
int* c_l_count;
hipMallocManaged(&c_l_count, sizeof(int) * max_level);
for (int i = 0; i < max_level; ++i)
c_l_count[i] = l_count[i];
int* c_l_start;
hipMallocManaged(&c_l_start, sizeof(int) * max_level);
for (int i = 0; i < max_level; ++i)
c_l_start[i] = l_start[i];
Gates_cu* gates_cu1;
Gates_cu** gates_cu;
Gates_cu* g_str;
if (CASES == 1){
hipMallocManaged(&gates_cu1, no_of_gates*sizeof(Gates_cu));
hipMallocManaged(&g_str, no_of_gates*sizeof(Gates_cu));
for (int j = 0; j < no_of_gates; ++j){
gates_cu1[j] = gates[j];
}
}
else{
hipMallocManaged(&gates_cu, CASES * sizeof(Gates_cu*));
for (int i = 0; i < CASES; ++i){
hipMallocManaged(&gates_cu[i], no_of_gates * sizeof(Gates_cu));
}
for (int i = 0; i < CASES; ++i){
for (int j = 0; j < no_of_gates; ++j){
gates_cu[i][j] = gates[j];
}
}
}
float* eigen_v;
float** eigen_vec;
int* edges_cu;
hipMallocManaged(&edges_cu, no_of_edges * sizeof(int));
hipMallocManaged(&eigen_v, no_of_pc * sizeof(float));
hipMallocManaged(&eigen_vec, no_of_pc * sizeof(float*));
for (int i = 0; i < no_of_edges; ++i){
edges_cu[i] = edges[i];
}
for (int i = 0; i < no_of_pc; ++i){
eigen_v[i] = eigen_values[i];
}
for (int i = 0; i < no_of_pc; ++i){
hipMallocManaged(&eigen_vec[i], no_of_pc * sizeof(float));
for (int j = 0; j < no_of_pc; ++j){
eigen_vec[i][j] = eigen_vectors[i][j];
}
}
for (int i = 0; i < no_of_pc; ++i){
assert(eigen_v[i] == eigen_values[i]);
for (int j = 0; j < no_of_pc; ++j)
assert(eigen_vectors[i][j] == eigen_vec[i][j]);
}
/////////////////////////////////////////////////////////////////////////////////////////
dim3 blockn = (no_of_gates - 1) / 1024 + 1;
dim3 threadn = 1024;
//hipDeviceSetCacheConfig(hipFuncCachePreferL1);
if (CASES == 1){
timing << < blockn, threadn >> > (gates_cu1, no_of_pc, eigen_v, eigen_vec, x_grid, no_of_gates);
hipDeviceSynchronize();
}
else{
for (int i = 0; i < CASES; ++i){
timing << < blockn, threadn >> > (gates_cu[i], no_of_pc, eigen_v, eigen_vec, x_grid, no_of_gates);
hipDeviceSynchronize();
}
}
float time_elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int current_level = 0;
while (current_level < max_level){
PCA << <CASES, MAX_TILE >> > (gates_cu1, no_of_gates, c_sort, edges_cu, k_parameters, \
c_l_count, c_l_start, current_level, no_of_pc, max_level);
hipDeviceSynchronize();
current_level++;
}
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_elapsed, start, stop);
printf("GPU running%f(ms)\n", time_elapsed);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "!!!! GPU program execution error in 2: cuda Error=%d,(%s)\n", cudaStatus, hipGetErrorString(cudaStatus));
}
return gates_cu1;
}
| 48268a0f68f0c830edae42bf7a089ebb4644221e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Gates.h"
#include <stdio.h>
// Gates_cu struct contains all the computation parameters
#define GRID_SIZE 900
#define MAX_TILE 1024
#define UNROLL 64
#define CASES 1
#ifdef __cplusplus
extern "C" {
#endif
extern __device__ float pdf_m[36][10] = {
{ 0.0000, 0.0040, 0.0080, 0.0120, 0.0160, 0.0199, 0.0239, 0.0279, 0.0319, 0.0359 },
{ 0.0398, 0.0438, 0.0478, 0.0517, 0.0557, 0.0596, 0.0636, 0.0675, 0.0714, 0.0753 },
{ 0.0793, 0.0832, 0.0871, 0.0910, 0.0948, 0.0987, 0.1026, 0.1064, 0.1103, 0.1141 },
{ 0.1179, 0.1217, 0.1255, 0.1293, 0.1331, 0.1368, 0.1406, 0.1443, 0.1480, 0.1517 },
{ 0.1554, 0.1591, 0.1628, 0.1664, 0.1700, 0.1736, 0.1772, 0.1808, 0.1844, 0.1879 },
{ 0.1915, 0.1950, 0.1985, 0.2019, 0.2054, 0.2088, 0.2123, 0.2157, 0.2190, 0.2224 },
{ 0.2257, 0.2291, 0.2324, 0.2357, 0.2389, 0.2422, 0.2454, 0.2486, 0.2517, 0.2549 },
{ 0.2580, 0.2611, 0.2642, 0.2673, 0.2704, 0.2734, 0.2764, 0.2794, 0.2823, 0.2852 },
{ 0.2881, 0.2910, 0.2939, 0.2967, 0.2995, 0.3023, 0.3051, 0.3078, 0.3106, 0.3133 },
{ 0.3159, 0.3186, 0.3212, 0.3238, 0.3264, 0.3289, 0.3315, 0.3340, 0.3365, 0.3389 },
{ 0.3413, 0.3438, 0.3461, 0.3485, 0.3508, 0.3531, 0.3554, 0.3577, 0.3599, 0.3621 },
{ 0.3643, 0.3665, 0.3686, 0.3708, 0.3729, 0.3749, 0.3770, 0.3790, 0.3810, 0.3830 },
{ 0.3849, 0.3869, 0.3888, 0.3907, 0.3925, 0.3944, 0.3962, 0.3980, 0.3997, 0.4015 },
{ 0.4032, 0.4049, 0.4066, 0.4082, 0.4099, 0.4115, 0.4131, 0.4147, 0.4162, 0.4177 },
{ 0.4192, 0.4207, 0.4222, 0.4236, 0.4251, 0.4265, 0.4279, 0.4292, 0.4306, 0.4319 },
{ 0.4332, 0.4345, 0.4357, 0.4370, 0.4382, 0.4394, 0.4406, 0.4418, 0.4429, 0.4441 },
{ 0.4452, 0.4463, 0.4474, 0.4484, 0.4495, 0.4505, 0.4515, 0.4525, 0.4535, 0.4545 },
{ 0.4554, 0.4564, 0.4573, 0.4582, 0.4591, 0.4599, 0.4608, 0.4616, 0.4625, 0.4633 },
{ 0.4641, 0.4649, 0.4656, 0.4664, 0.4671, 0.4678, 0.4686, 0.4693, 0.4699, 0.4706 },
{ 0.4713, 0.4719, 0.4726, 0.4732, 0.4738, 0.4744, 0.4750, 0.4756, 0.4761, 0.4767 },
{ 0.4772, 0.4778, 0.4783, 0.4788, 0.4793, 0.4798, 0.4803, 0.4808, 0.4812, 0.4817 },
{ 0.4821, 0.4826, 0.4830, 0.4834, 0.4838, 0.4842, 0.4846, 0.4850, 0.4854, 0.4857 },
{ 0.4861, 0.4864, 0.4868, 0.4871, 0.4875, 0.4878, 0.4881, 0.4884, 0.4887, 0.4890 },
{ 0.4893, 0.4896, 0.4898, 0.4901, 0.4904, 0.4906, 0.4909, 0.4911, 0.4913, 0.4916 },
{ 0.4918, 0.4920, 0.4922, 0.4925, 0.4927, 0.4929, 0.4931, 0.4932, 0.4934, 0.4936 },
{ 0.4938, 0.4940, 0.4941, 0.4943, 0.4945, 0.4946, 0.4948, 0.4949, 0.4951, 0.4952 },
{ 0.4953, 0.4955, 0.4956, 0.4957, 0.4959, 0.4960, 0.4961, 0.4962, 0.4963, 0.4964 },
{ 0.4965, 0.4966, 0.4967, 0.4968, 0.4969, 0.4970, 0.4971, 0.4972, 0.4973, 0.4974 },
{ 0.4974, 0.4975, 0.4976, 0.4977, 0.4977, 0.4978, 0.4979, 0.4979, 0.4980, 0.4981 },
{ 0.4981, 0.4982, 0.4982, 0.4983, 0.4984, 0.4984, 0.4985, 0.4985, 0.4986, 0.4986 },
{ 0.4987, 0.4987, 0.4987, 0.4988, 0.4988, 0.4989, 0.4989, 0.4989, 0.4990, 0.4990 },
{ 0.4990, 0.4991, 0.4991, 0.4991, 0.4992, 0.4992, 0.4992, 0.4992, 0.4993, 0.4993 },
{ 0.4993, 0.4993, 0.4994, 0.4994, 0.4994, 0.4994, 0.4994, 0.4995, 0.4995, 0.4995 },
{ 0.4995, 0.4995, 0.4995, 0.4996, 0.4996, 0.4996, 0.4996, 0.4996, 0.4996, 0.4997 },
{ 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4997, 0.4998 },
{ 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998, 0.4998 },
};
extern __device__ float row[36] = { 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9, 3.0, 3.1, 3.2, 3.3, 3.4, 3.5 };
extern __device__ float col[10] = { 0.00, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09 };
#ifdef __cplusplus
}
#endif
texture<int, 1, cudaReadModeElementType> count_ter;
texture<int, 1, cudaReadModeElementType> start_ter;
//texture<int, 1, cudaReadModeElementType> edges_ter;
//texture<int, 1, cudaReadModeElementType> sort_ter;
texture<float, 1, cudaReadModeElementType> k_m_ter;
typedef struct {
float mu;
float sigma;
}mu_sigma_struct;
__device__ float integrate1(float beta){
bool positive;
float b = abs(beta);
float result;
int flag = 0;
int i, j;
for (i = 0; i < 36; ++i){
for (j = 0; j < 10; ++j){
if (b >(row[i] + col[j]))
continue;
else{
flag = 1;
break;
}
}
if (flag)
break;
}
if ((i - 1) < 0 || (j - 1) < 0)
result = pdf_m[i][j];
else if (i > 35 || j > 35)
result = pdf_m[i - 1][j - 1];
else
result = 0.5*(pdf_m[i][j] + pdf_m[i - 1][j - 1]);
if (beta >= 0){
return result + 0.5;
}
else{
return 0.5 - result;
}
}
__device__ float integrate(float beta){
float step = 0.01f;
float down_idx = -5;
int up_idx = (int)(beta - down_idx) / step;
float micro = down_idx;
float sum = 0.0f;
float tmp;
for (int ii = 0; ii < up_idx; ii++){
tmp = pow(micro, 2) / 2;
sum += pow(2.718281828f, -tmp) * step;
micro += step;
}
return sum / sqrt(2 * 3.141592654f);
}
__device__ void sum_function(mu_sigma_struct max_strc, Gates_cu* currentG, float* k_para_matrix, int no_of_pc, \
int max_k_idx){
currentG->delay_mu = currentG->gate_mu + max_strc.mu;
int offset = 2 * no_of_pc;
int g_id_tmp = currentG->id;
float tmp = 0.0f;
float tmp2 = 0.0f;
float sigma = 0.0f;
for (int i = 0; i < offset; i++)
{
//tmp = tex1Dfetch(k_m_ter, (g_id_tmp * 2 * no_of_pc + i)) + tex1Dfetch(k_m_ter, (max_k_idx * 2 * no_of_pc + i));
tmp = k_para_matrix[g_id_tmp * 2 * no_of_pc + i] + k_para_matrix[max_k_idx * 2 * no_of_pc + i];
//tmp = l_k_m[idx * 40 + i] + l_k_m[idx * 40 + i];
sigma += pow(tmp, 2);
}
currentG->delay_sigma = sqrt(sigma);
}
__device__ void max_function(Gates_cu* gates_t, Gates_cu* currentG, int* edges_t, int no_of_pc, float* k_para_matrix, \
int bidx, int current_level){
int e_idx = edges_t[currentG->start_in];
float mu_1, mu_2, sigma_1, sigma_2;
int offset = 2 * no_of_pc;
int max_k_idx;
mu_sigma_struct max_strc;
int current_k_idx;
max_strc.mu = gates_t[e_idx].delay_mu;
max_strc.sigma = gates_t[e_idx].delay_sigma;
max_k_idx = gates_t[e_idx].id;
for (int i = currentG->start_in + 1; i < currentG->start_in + currentG->no_of_in; i++){
e_idx = edges_t[i];
mu_1 = max_strc.mu;
sigma_1 = max_strc.sigma;
mu_2 = gates_t[e_idx].delay_mu;
sigma_2 = gates_t[e_idx].delay_sigma;
current_k_idx = gates_t[e_idx].id;
if (mu_1 - 3 * sigma_1 > mu_2 + 3 * sigma_2)
{
continue;
}
if (mu_1 + 3 * sigma_1 < mu_2 - 3 * sigma_2)
{
max_strc.mu = mu_2;
max_strc.sigma = sigma_2;
max_k_idx = current_k_idx;
continue;
}
//step 2
float co_variance = 0.0f;
float correlation = 0.0f;
for (int j = 0; j < offset; j++){
//co_variance += tex1Dfetch(k_m_ter, (max_k_idx * offset + j)) * tex1Dfetch(k_m_ter, (current_k_idx * offset + j));
co_variance += k_para_matrix[max_k_idx * offset + j] * k_para_matrix[current_k_idx * offset + j];
//co_variance += k_l[fake + j] * k_l[fake + j];
}
correlation = co_variance / (sigma_1 * sigma_2);
if (correlation > 0.99 && abs(sigma_1 - sigma_2) < 0.1){
if (mu_1 > mu_2){
continue;
}
else{
max_strc.mu = mu_2;
max_strc.sigma = sigma_2;
max_k_idx = current_k_idx;
continue;
}
}
//step 3
float alpha = sqrt(abs(pow(sigma_1, 2) + pow(sigma_2, 2) - 2 * co_variance));
float beta = (mu_1 - mu_2) / alpha;
float phi = pow(2.718281828f, -beta*beta / 2) / sqrt(2 * 3.141592654f);
float phi_intg = integrate1(beta);
float phi_intg_m = integrate1(-beta);
float sigma_3, mu_3;
mu_3 = mu_1 * phi_intg + mu_2 * phi_intg_m + alpha * phi;
float sigma_tmp = (pow(mu_1, 2) + pow(sigma_1, 2)) * phi_intg + (pow(mu_2, 2) + pow(sigma_2, 2)) * phi_intg_m + (mu_1 + mu_2) * alpha * phi - mu_3*mu_3;
sigma_3 = sqrt(abs(sigma_tmp));
/*
step 4
float S0 = 0.0f;
for (int j = 0; j < offset; j++){
float r_1 = tex2D(k_m_ter, max_k_idx, j);
float r_2 = tex2D(k_m_ter, current_k_idx, j);
float ar_tmp = (sigma_1*r_1*phi_intg + sigma_2*r_2*phi_intg_m) / sigma_3;
max_k[j] = ar_tmp;
S0 += ar_tmp * ar_tmp;
}
for (int j = 0; j < offset; j++){
max_k[j] = max_k[j] * sigma_3 / sqrt(abs(S0));
}
*/
max_strc.mu = mu_3;
max_strc.sigma = sigma_3;
}
sum_function(max_strc, currentG, k_para_matrix, no_of_pc, max_k_idx);
}
__global__ void PCA(Gates_cu* gates, int no_of_gates, int* sort, int* edges, float* k_param, \
int* l_count, int* l_start, int current_level, int no_of_pc, int max_level){
Gates_cu tmp;
//current_level=0;
int i = threadIdx.x;// +blockIdx.x*blockDim.x;
int idx = threadIdx.x;
int b = blockIdx.x;
//while (current_level < max_level){
int count; int start;
count = l_count[current_level];
start = l_start[current_level];
if (i < count){
tmp = gates[start + i];
if (tmp.is_input)
{
tmp.delay_mu = tmp.gate_mu;
tmp.delay_sigma = tmp.gate_sigma;
}
else{
//--max--//
max_function(gates, &tmp, edges, no_of_pc, k_param, b, current_level); //no change supposed to be on gates[i]
//printf("%f\n", l_gates[idx].delay_sigma);
//--sum--//
//sum_function(max_strc, &l_gates[i], k_param, no_of_pc, gates, edges); // gates[i] param should changes
}
//l_gates[tmp.id%MAX_TILE] = tmp;
gates[start + i] = tmp;
//test(gates, &l_gates[i], edges);
//gates[b][sort[start + i]] = l_gates[idx];
//gates[sort[start + i]] = tmp;
}
//current_level++;
//__syncthreads();
//}
}
__global__ void timing(Gates_cu* gates, int no_of_pc, float* eigen_v, float** eigen_vec, int x_grids, int no_of_gates)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < no_of_gates){
float sigma_of_delay = 0.0f;
gates[k].gate_mu = 0.0f; // This is to init mu
gates[k].gate_sigma = 0.0f; // This is to init sigma
gates[k].gate_mu = gates[k].delay;
int row = (int)floor((float)(gates[k].y / GRID_SIZE));
int column = (int)floor((float)(gates[k].x / GRID_SIZE));
int i_of_j = row*x_grids + column;
float k_tmp = 0.0f;
for (int j = 0; j < no_of_pc; j++)
{
if (eigen_v[j] < 0){
eigen_v[j] = 0;
}
//L = sqr eg_v * eg_vec * sigma, the dRdL is the constain for a specific size of gate, i'll update it
k_tmp = (0.15f / 3)* gates[k].delay * sqrt(eigen_v[j]) * eigen_vec[i_of_j][j];// *sigma_of_L;
sigma_of_delay += k_tmp * k_tmp;
//gates[k].k_param[j] = k_tmp;
}
for (int j = 0; j < no_of_pc; j++)
{
//W = sqr eg_v * eg_vec * sigma, the dRdW is the constain for a specific size of gate, i'll update it
k_tmp = -(0.08f / 3)* gates[k].delay * sqrt(eigen_v[j]) * eigen_vec[i_of_j][j];// *sigma_of_W;
sigma_of_delay += k_tmp * k_tmp;
//gates[k].k_param[j + no_of_pc] = k_tmp;
}
/*----get gate[i]'s sigma----*/
gates[k].gate_sigma = sqrt(sigma_of_delay);
}
}
extern "C" Gates_cu* cuSSTA(Gates_cu* gates, int no_of_gates, int* sort, int* edges, int no_of_edges, int no_of_pc, float *eigen_values, \
float **eigen_vectors, int x_grid, float* k_param, int* l_count, int* l_start, int max_level, Gates* gates_t){
cudaError_t cudaStatus;
int* c_sort;
cudaMallocManaged(&c_sort, sizeof(int) * no_of_gates);
for (int i = 0; i < no_of_gates; ++i){
c_sort[i] = sort[i];
}
float* k_parameters;
cudaMallocManaged(&k_parameters, sizeof(float) * no_of_gates * no_of_pc * 2);
for (int i = 0; i < no_of_gates * no_of_pc * 2; ++i){
k_parameters[i] = k_param[i];
}
int* c_l_count;
cudaMallocManaged(&c_l_count, sizeof(int) * max_level);
for (int i = 0; i < max_level; ++i)
c_l_count[i] = l_count[i];
int* c_l_start;
cudaMallocManaged(&c_l_start, sizeof(int) * max_level);
for (int i = 0; i < max_level; ++i)
c_l_start[i] = l_start[i];
Gates_cu* gates_cu1;
Gates_cu** gates_cu;
Gates_cu* g_str;
if (CASES == 1){
cudaMallocManaged(&gates_cu1, no_of_gates*sizeof(Gates_cu));
cudaMallocManaged(&g_str, no_of_gates*sizeof(Gates_cu));
for (int j = 0; j < no_of_gates; ++j){
gates_cu1[j] = gates[j];
}
}
else{
cudaMallocManaged(&gates_cu, CASES * sizeof(Gates_cu*));
for (int i = 0; i < CASES; ++i){
cudaMallocManaged(&gates_cu[i], no_of_gates * sizeof(Gates_cu));
}
for (int i = 0; i < CASES; ++i){
for (int j = 0; j < no_of_gates; ++j){
gates_cu[i][j] = gates[j];
}
}
}
float* eigen_v;
float** eigen_vec;
int* edges_cu;
cudaMallocManaged(&edges_cu, no_of_edges * sizeof(int));
cudaMallocManaged(&eigen_v, no_of_pc * sizeof(float));
cudaMallocManaged(&eigen_vec, no_of_pc * sizeof(float*));
for (int i = 0; i < no_of_edges; ++i){
edges_cu[i] = edges[i];
}
for (int i = 0; i < no_of_pc; ++i){
eigen_v[i] = eigen_values[i];
}
for (int i = 0; i < no_of_pc; ++i){
cudaMallocManaged(&eigen_vec[i], no_of_pc * sizeof(float));
for (int j = 0; j < no_of_pc; ++j){
eigen_vec[i][j] = eigen_vectors[i][j];
}
}
for (int i = 0; i < no_of_pc; ++i){
assert(eigen_v[i] == eigen_values[i]);
for (int j = 0; j < no_of_pc; ++j)
assert(eigen_vectors[i][j] == eigen_vec[i][j]);
}
/////////////////////////////////////////////////////////////////////////////////////////
dim3 blockn = (no_of_gates - 1) / 1024 + 1;
dim3 threadn = 1024;
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
if (CASES == 1){
timing << < blockn, threadn >> > (gates_cu1, no_of_pc, eigen_v, eigen_vec, x_grid, no_of_gates);
cudaDeviceSynchronize();
}
else{
for (int i = 0; i < CASES; ++i){
timing << < blockn, threadn >> > (gates_cu[i], no_of_pc, eigen_v, eigen_vec, x_grid, no_of_gates);
cudaDeviceSynchronize();
}
}
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int current_level = 0;
while (current_level < max_level){
PCA << <CASES, MAX_TILE >> > (gates_cu1, no_of_gates, c_sort, edges_cu, k_parameters, \
c_l_count, c_l_start, current_level, no_of_pc, max_level);
cudaDeviceSynchronize();
current_level++;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed, start, stop);
printf("GPU running:%f(ms)\n", time_elapsed);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "!!!! GPU program execution error in 2: cuda Error=%d,(%s)\n", cudaStatus, cudaGetErrorString(cudaStatus));
}
return gates_cu1;
}
|
51f5e0c861d0925571588850101a8fe94e24073e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/misc.h"
#include "octnet/gpu/gpu.h"
#include <cstdio>
#include <cstdlib>
#if defined(_OPENMP)
#include <omp.h>
#endif
__global__ void kernel_mask_by_label(ot_data_t* values, int n_leafs, const ot_data_t* labels, int mask_label, int feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int label = labels[leaf_idx];
if(label == mask_label) {
for(int f = 0; f < feature_size; ++f) {
values[leaf_idx * feature_size + f] = 0;
}
}
}
}
extern "C"
void octree_mask_by_label_gpu(const octree* labels, int mask_label, bool check, octree* values) {
if(check && (labels->feature_size != 1 || !octree_equal_trees_gpu(labels, values))) {
printf("[ERROR] mask_by_label - tree structure of inputs do not match\n");
exit(-1);
}
hipLaunchKernelGGL(( kernel_mask_by_label), dim3(GET_BLOCKS(values->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
values->data, values->n_leafs, labels->data, mask_label, values->feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_determine_gt_split(octree out, int n_leafs, const octree struc, const ot_data_t* gt, int dense_depth, int dense_height, int dense_width) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int grid_idx = out.data[leaf_idx * out.feature_size];
const ot_tree_t* tree = octree_get_tree(&struc, grid_idx);
int data_idx = leaf_idx - struc.prefix_leafs[grid_idx];
int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
int depth = octree_ind_to_dense_ind(&struc, grid_idx, bit_idx, &n, &d,&h,&w);
int width = width_from_depth(depth);
int sum = 0;
for(int dd = 0; dd < width; ++dd) {
for(int hh = 0; hh < width; ++hh) {
for(int ww = 0; ww < width; ++ww) {
float val = gt[(((n * 1 + 0) * dense_depth + (d+dd)) * dense_height + (h+hh)) * dense_width + (w+ww)];
sum += round(val);
}
}
}
// printf("grid_idx=%d, sum=%d, (width=%d)\n", grid_idx, sum, width);
if(sum == 0 || sum == width*width*width) {
out.data[leaf_idx * out.feature_size] = 0;
}
else {
out.data[leaf_idx * out.feature_size] = 1;
}
}
}
extern "C"
void octree_determine_gt_split_gpu(const octree* struc, const ot_data_t* gt, octree* out) {
octree_resize_gpu(struc->n, struc->grid_depth, struc->grid_height, struc->grid_width, 1, struc->n_leafs, out);
octree_cpy_trees_gpu_gpu(struc, out);
octree_cpy_prefix_leafs_gpu_gpu(struc, out);
int dense_depth = struc->grid_depth * 8;
int dense_height = struc->grid_height * 8;
int dense_width = struc->grid_width * 8;
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
hipLaunchKernelGGL(( kernel_determine_gt_split), dim3(GET_BLOCKS(out->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, out->n_leafs, *struc, gt, dense_depth, dense_height, dense_width
);
CUDA_POST_KERNEL_CHECK;
}
| 51f5e0c861d0925571588850101a8fe94e24073e.cu | // Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/misc.h"
#include "octnet/gpu/gpu.h"
#include <cstdio>
#include <cstdlib>
#if defined(_OPENMP)
#include <omp.h>
#endif
__global__ void kernel_mask_by_label(ot_data_t* values, int n_leafs, const ot_data_t* labels, int mask_label, int feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int label = labels[leaf_idx];
if(label == mask_label) {
for(int f = 0; f < feature_size; ++f) {
values[leaf_idx * feature_size + f] = 0;
}
}
}
}
extern "C"
void octree_mask_by_label_gpu(const octree* labels, int mask_label, bool check, octree* values) {
if(check && (labels->feature_size != 1 || !octree_equal_trees_gpu(labels, values))) {
printf("[ERROR] mask_by_label - tree structure of inputs do not match\n");
exit(-1);
}
kernel_mask_by_label<<<GET_BLOCKS(values->n_leafs), CUDA_NUM_THREADS>>>(
values->data, values->n_leafs, labels->data, mask_label, values->feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_determine_gt_split(octree out, int n_leafs, const octree struc, const ot_data_t* gt, int dense_depth, int dense_height, int dense_width) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int grid_idx = out.data[leaf_idx * out.feature_size];
const ot_tree_t* tree = octree_get_tree(&struc, grid_idx);
int data_idx = leaf_idx - struc.prefix_leafs[grid_idx];
int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
int depth = octree_ind_to_dense_ind(&struc, grid_idx, bit_idx, &n, &d,&h,&w);
int width = width_from_depth(depth);
int sum = 0;
for(int dd = 0; dd < width; ++dd) {
for(int hh = 0; hh < width; ++hh) {
for(int ww = 0; ww < width; ++ww) {
float val = gt[(((n * 1 + 0) * dense_depth + (d+dd)) * dense_height + (h+hh)) * dense_width + (w+ww)];
sum += round(val);
}
}
}
// printf("grid_idx=%d, sum=%d, (width=%d)\n", grid_idx, sum, width);
if(sum == 0 || sum == width*width*width) {
out.data[leaf_idx * out.feature_size] = 0;
}
else {
out.data[leaf_idx * out.feature_size] = 1;
}
}
}
extern "C"
void octree_determine_gt_split_gpu(const octree* struc, const ot_data_t* gt, octree* out) {
octree_resize_gpu(struc->n, struc->grid_depth, struc->grid_height, struc->grid_width, 1, struc->n_leafs, out);
octree_cpy_trees_gpu_gpu(struc, out);
octree_cpy_prefix_leafs_gpu_gpu(struc, out);
int dense_depth = struc->grid_depth * 8;
int dense_height = struc->grid_height * 8;
int dense_width = struc->grid_width * 8;
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
kernel_determine_gt_split<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>(
*out, out->n_leafs, *struc, gt, dense_depth, dense_height, dense_width
);
CUDA_POST_KERNEL_CHECK;
}
|
ef6b478cdaa7f76b61a849dee21b72243f483b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_mix20_1(uint8_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixres, int32_t m2, int32_t mixbits)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples)
{
int32_t l, r;
ip += 3 * z;
ip += (stride - 1) * 3 * z;
l = (int32_t)(((uint32_t)ip[HBYTE] << 16) | ((uint32_t)ip[MBYTE] << 8) | (uint32_t)ip[LBYTE]);
l = (l << 8) >> 12;
ip += 3;
r = (int32_t)(((uint32_t)ip[HBYTE] << 16) | ((uint32_t)ip[MBYTE] << 8) | (uint32_t)ip[LBYTE]);
r = (r << 8) >> 12;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} | ef6b478cdaa7f76b61a849dee21b72243f483b50.cu | #include "includes.h"
__global__ void gpu_mix20_1(uint8_t * ip, uint32_t stride, int32_t * u, int32_t * v, int32_t numSamples, int32_t mixres, int32_t m2, int32_t mixbits)
{
int z = threadIdx.x + blockIdx.x * blockDim.x;
if (z < numSamples)
{
int32_t l, r;
ip += 3 * z;
ip += (stride - 1) * 3 * z;
l = (int32_t)(((uint32_t)ip[HBYTE] << 16) | ((uint32_t)ip[MBYTE] << 8) | (uint32_t)ip[LBYTE]);
l = (l << 8) >> 12;
ip += 3;
r = (int32_t)(((uint32_t)ip[HBYTE] << 16) | ((uint32_t)ip[MBYTE] << 8) | (uint32_t)ip[LBYTE]);
r = (r << 8) >> 12;
u[z] = (mixres * l + m2 * r) >> mixbits;
v[z] = l - r;
}
} |
2728d1501d16442641da8599a887bc3ba71a38c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_nn_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[(128*16 + 32)*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[63-tid];
}
| 2728d1501d16442641da8599a887bc3ba71a38c8.cu | /*
* Copyright 2014 Nervana Systems Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C"
__global__ void __launch_bounds__(128) sgemm_nn_128x32
(
unsigned* param_Rand,
const float* param_A,
const float* param_B,
float* param_C,
int param_lda,
int param_ldb8,
int param_ldc,
int param_m,
int param_n,
int param_k,
float param_alpha,
float param_beta,
int param_flags,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
)
{
__shared__ float share[(128*16 + 32)*2 + 32*16*2 + 4];
int tid = threadIdx.x;
share[tid] = 1;
param_C[tid] = share[63-tid];
}
|
2d5b433cb775d48d57c352b647f5840fb12848dd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "mpi.h"
const static int nGPUperNode=2;
// GPU based autoencoder PCA code
using namespace std;
#include "nelmin.h"
// Define the sigmoidal function
__device__ __host__
inline float G(float x) { return( tanhf(x) ) ;}
__device__ __host__
inline double G(double x) { return( tanh(x) );}
// This is a convience class to hold all the examples and
// archtecture information. Most is boilerplate. CalcError
// is where all the work happens.
template<typename Real>
class ObjFunc {
private:
double objFuncCallTime;
unsigned int objFuncCallCount;
protected:
int nExamples;
thrust::device_vector<Real> d_data;
thrust::device_vector<Real> d_param;
public:
#include "CalcError.h"
ObjFunc() { nExamples = 0; objFuncCallCount=0; objFuncCallTime=0.;}
double aveObjFuncWallTime() { return(objFuncCallTime/objFuncCallCount); }
double totalObjFuncWallTime() { return(objFuncCallTime); }
int get_nExamples() {return(nExamples);}
void setExamples(thrust::host_vector<Real>& _h_data) {
nExamples = _h_data.size()/exLen;
// copy data to the device
int rank;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
hipSetDevice(rank%nGPUperNode);
d_data = thrust::device_vector<Real>(nExamples*exLen);
thrust::copy(_h_data.begin(), _h_data.end(), d_data.begin());
d_param = thrust::device_vector<Real>(nParam);
}
Real objFunc(Real *p)
{
int rank,op;
Real sum=0.;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
hipSetDevice(rank%nGPUperNode);
if(nExamples == 0) {
cerr << "data not set " << endl; exit(1);
}
CalcError getError(thrust::raw_pointer_cast(&d_data[0]),
thrust::raw_pointer_cast(&d_param[0]),
nInput, exLen);
if(rank > 0) { // slave objective function
Real *param;
hipHostMalloc(¶m, sizeof(Real)*nParam,hipHostMallocPortable);
for(;;) { // loop until the master says I am done.
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(op==0) {
hipHostFree(param);
return(0);
}
if(sizeof(Real) == sizeof(float))
MPI_Bcast(¶m[0], nParam, MPI_FLOAT, 0, MPI_COMM_WORLD);
else
MPI_Bcast(¶m[0], nParam, MPI_DOUBLE, 0, MPI_COMM_WORLD);
thrust::copy(param, param+nParam, d_param.begin());
Real mySum = thrust::transform_reduce(
thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(nExamples),
getError,
(Real) 0.,
thrust::plus<Real>());
if(sizeof(Real) == sizeof(float))
MPI_Reduce(&mySum, &sum, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
else
MPI_Reduce(&mySum, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
}
} else { // master process
double startTime=omp_get_wtime();
op=1;
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(sizeof(Real) == sizeof(float))
MPI_Bcast(&p[0], nParam, MPI_FLOAT, 0, MPI_COMM_WORLD);
else
MPI_Bcast(&p[0], nParam, MPI_DOUBLE, 0, MPI_COMM_WORLD);
thrust::copy(p, p+nParam, d_param.begin());
Real mySum = thrust::transform_reduce(
thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(nExamples),
getError,
(Real) 0.,
thrust::plus<Real>());
if(sizeof(Real) == sizeof(float))
MPI_Reduce(&mySum, &sum, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
else
MPI_Reduce(&mySum, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
objFuncCallTime += (omp_get_wtime() - startTime);
objFuncCallCount++;
}
return(sum);
}
};
// Wrapper so the objective function can be called
// as a pointer to function for C-style libraries.
// Note: polymorphism allows easy use of
// either float or double types.
void* objFunc_object=NULL;
float func(float* param)
{
if(objFunc_object)
return ((ObjFunc<float>*) objFunc_object)->objFunc(param);
return(0.);
}
double func(double* param)
{
if(objFunc_object)
return ((ObjFunc<double>*) objFunc_object)->objFunc(param);
return(0.);
}
// get a uniform random number between -1 and 1
inline float f_rand() {
return 2*(rand()/((float)RAND_MAX)) -1.;
}
template <typename Real, int nInput>
void testNN( const Real *p, const Real *in, Real *out)
{
register int index=0;
register Real h2_0 = p[index++]; // bottleneck neuron
{
register Real h1_0 = p[index++];
register Real h1_1 = p[index++];
register Real h1_2 = p[index++];
register Real h1_3 = p[index++];
for(int i=0; i < nInput; i++) {
register Real input=in[i];
h1_0 += input * p[index++]; h1_1 += input * p[index++];
h1_2 += input * p[index++]; h1_3 += input * p[index++];
}
h1_0 = G(h1_0); h1_1 = G(h1_1);
h1_2 = G(h1_2); h1_3 = G(h1_3);
h2_0 += p[index++] * h1_0; h2_0 += p[index++] * h1_1;
h2_0 += p[index++] * h1_2; h2_0 += p[index++] * h1_3;
}
register Real h3_0 = p[index++];
register Real h3_1 = p[index++];
register Real h3_2 = p[index++];
register Real h3_3 = p[index++];
h3_0 += p[index++] * h2_0; h3_1 += p[index++] * h2_0;
h3_2 += p[index++] * h2_0; h3_3 += p[index++] * h2_0;
h3_0 = G(h3_0); h3_1 = G(h3_1);
h3_2 = G(h3_2); h3_3 = G(h3_3);
for(int i=0; i < nInput; i++) {
register Real o = p[index++];
o += h3_0 * p[index++]; o += h3_1 * p[index++];
o += h3_2 * p[index++]; o += h3_3 * p[index++];
out[i]=o;
}
}
template <typename Real>
void genData(thrust::host_vector<Real> &h_data, int nVec, Real xVar)
{
Real xMax = 1.1; Real xMin = -xMax;
Real xRange = (xMax - xMin);
for(int i=0; i < nVec; i++) {
Real t = xRange * f_rand();
Real z1 = t + xVar * f_rand();
Real z2 = t*t*t + xVar * f_rand();
h_data.push_back( z1 );
h_data.push_back( z2 );
}
}
#include <fstream>
template <typename Real>
void trainTest(char* filename, int rank, int numtasks)
{
ObjFunc<Real> testObj;
const int nParam = testObj.nParam;
cout << "nParam " << nParam << endl;
// read the test data
ifstream inFile (filename, ios::in | ios::binary);
// position 0 bytes from end
inFile.seekg(0, ios::end);
// determine the file size in bytes
ios::pos_type size = inFile.tellg();
// allocate number of Real values for this task
// (assumes a multiple of numtasks)
int nExamplesPerGPU = size/sizeof(Real)/testObj.exLen/numtasks;
thrust::host_vector<Real> h_data(nExamplesPerGPU*testObj.exLen);
// seek to the byte location in the file
inFile.seekg(rank*h_data.size()*sizeof(Real), ios::beg);
// read bytes from the file into h_data
inFile.read((char*)&h_data[0], h_data.size()*sizeof(Real));
// close the file
inFile.close();
testObj.setExamples(h_data);
int nExamples = testObj.get_nExamples();
if(rank > 0) {
testObj.objFunc( NULL );
return;
}
cout << "GB data " << (h_data.size()*sizeof(Real)/1e9) << endl;
// set the Nelder-Mead starting conditions
int icount, ifault, numres;
vector<Real> start(nParam);
vector<Real> step(nParam,1.);
vector<Real> xmin(nParam);
srand(0);
for(int i=0; i < start.size(); i++) start[i] = 0.2 * f_rand();
Real ynewlo = testObj.objFunc( &start[0] );
Real reqmin = 1.0E-18;
int konvge = 10;
int kcount = 500000; // set large for high-precision
objFunc_object = &testObj;
double optStartTime=omp_get_wtime();
nelmin<Real> (func, nParam, &start[0], &xmin[0], &ynewlo, reqmin, &step[0],
konvge, kcount, &icount, &numres, &ifault );
double optTime=omp_get_wtime()-optStartTime;
cout << endl <<" Return code IFAULT = " << ifault << endl << endl;
cout << " Estimate of minimizing value X*:" << endl << endl;
cout << " F(X*) = " << ynewlo << endl;
cout << " Number of iterations = " << icount << endl;
cout << " Number of restarts = " << numres << endl << endl;
cout << "Average wall time for ObjFunc "
<< testObj.aveObjFuncWallTime() << endl;
cout << "Total wall time in optimization method " << optTime << endl;
cout << "Percent time in objective function " <<
(100.*(testObj.totalObjFuncWallTime()/optTime)) << endl;
cout << " -- Generate scatter plot -- " << endl;
int index=0, nTest=100;
cout << "original input calculated" << endl;
thrust::host_vector<Real> h_test;
thrust::host_vector<Real> h_in(testObj.nInput);
thrust::host_vector<Real> h_out(testObj.nOutput);
genData<Real>(h_test, nTest, 0.0); // note: no variance for the test
for(int i=0; i< nTest; i++) {
h_in[0] = h_test[index++];
h_in[1] = h_test[index++];
testNN<Real,2>(&xmin[0],&h_in[0],&h_out[0]);
cout << h_data[testObj.nInput*i] << "," << h_data[testObj.nInput*i+1] << " "
<< h_in[0] << "," << h_in[1] << " "
<< h_out[0] << "," << h_out[1] << endl;
}
int op=0; // shutdown slave processes
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
#include <stdio.h>
int main(int argc, char *argv[])
{
int numtasks, rank, ret;
if(argc < 2) {
fprintf(stderr,"Use: filename\n");
exit(1);
}
ret = MPI_Init(&argc,&argv);
if (ret != MPI_SUCCESS) {
printf ("Error in MPI_Init()!\n");
MPI_Abort(MPI_COMM_WORLD, ret);
}
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
printf ("Number of tasks= %d My rank= %d\n", numtasks,rank);
/******* do some work *******/
#ifdef USE_DBL
trainTest<double> ( argv[1], rank, numtasks );
#else
trainTest<float> ( argv[1], rank, numtasks);
#endif
MPI_Finalize();
return 0;
}
| 2d5b433cb775d48d57c352b647f5840fb12848dd.cu | #include <iostream>
#include <iomanip>
#include <cmath>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include "mpi.h"
const static int nGPUperNode=2;
// GPU based autoencoder PCA code
using namespace std;
#include "nelmin.h"
// Define the sigmoidal function
__device__ __host__
inline float G(float x) { return( tanhf(x) ) ;}
__device__ __host__
inline double G(double x) { return( tanh(x) );}
// This is a convience class to hold all the examples and
// archtecture information. Most is boilerplate. CalcError
// is where all the work happens.
template<typename Real>
class ObjFunc {
private:
double objFuncCallTime;
unsigned int objFuncCallCount;
protected:
int nExamples;
thrust::device_vector<Real> d_data;
thrust::device_vector<Real> d_param;
public:
#include "CalcError.h"
ObjFunc() { nExamples = 0; objFuncCallCount=0; objFuncCallTime=0.;}
double aveObjFuncWallTime() { return(objFuncCallTime/objFuncCallCount); }
double totalObjFuncWallTime() { return(objFuncCallTime); }
int get_nExamples() {return(nExamples);}
void setExamples(thrust::host_vector<Real>& _h_data) {
nExamples = _h_data.size()/exLen;
// copy data to the device
int rank;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
cudaSetDevice(rank%nGPUperNode);
d_data = thrust::device_vector<Real>(nExamples*exLen);
thrust::copy(_h_data.begin(), _h_data.end(), d_data.begin());
d_param = thrust::device_vector<Real>(nParam);
}
Real objFunc(Real *p)
{
int rank,op;
Real sum=0.;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
cudaSetDevice(rank%nGPUperNode);
if(nExamples == 0) {
cerr << "data not set " << endl; exit(1);
}
CalcError getError(thrust::raw_pointer_cast(&d_data[0]),
thrust::raw_pointer_cast(&d_param[0]),
nInput, exLen);
if(rank > 0) { // slave objective function
Real *param;
cudaHostAlloc(¶m, sizeof(Real)*nParam,cudaHostAllocPortable);
for(;;) { // loop until the master says I am done.
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(op==0) {
cudaFreeHost(param);
return(0);
}
if(sizeof(Real) == sizeof(float))
MPI_Bcast(¶m[0], nParam, MPI_FLOAT, 0, MPI_COMM_WORLD);
else
MPI_Bcast(¶m[0], nParam, MPI_DOUBLE, 0, MPI_COMM_WORLD);
thrust::copy(param, param+nParam, d_param.begin());
Real mySum = thrust::transform_reduce(
thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(nExamples),
getError,
(Real) 0.,
thrust::plus<Real>());
if(sizeof(Real) == sizeof(float))
MPI_Reduce(&mySum, &sum, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
else
MPI_Reduce(&mySum, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
}
} else { // master process
double startTime=omp_get_wtime();
op=1;
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(sizeof(Real) == sizeof(float))
MPI_Bcast(&p[0], nParam, MPI_FLOAT, 0, MPI_COMM_WORLD);
else
MPI_Bcast(&p[0], nParam, MPI_DOUBLE, 0, MPI_COMM_WORLD);
thrust::copy(p, p+nParam, d_param.begin());
Real mySum = thrust::transform_reduce(
thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(nExamples),
getError,
(Real) 0.,
thrust::plus<Real>());
if(sizeof(Real) == sizeof(float))
MPI_Reduce(&mySum, &sum, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD);
else
MPI_Reduce(&mySum, &sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
objFuncCallTime += (omp_get_wtime() - startTime);
objFuncCallCount++;
}
return(sum);
}
};
// Wrapper so the objective function can be called
// as a pointer to function for C-style libraries.
// Note: polymorphism allows easy use of
// either float or double types.
void* objFunc_object=NULL;
float func(float* param)
{
if(objFunc_object)
return ((ObjFunc<float>*) objFunc_object)->objFunc(param);
return(0.);
}
double func(double* param)
{
if(objFunc_object)
return ((ObjFunc<double>*) objFunc_object)->objFunc(param);
return(0.);
}
// get a uniform random number between -1 and 1
inline float f_rand() {
return 2*(rand()/((float)RAND_MAX)) -1.;
}
template <typename Real, int nInput>
void testNN( const Real *p, const Real *in, Real *out)
{
register int index=0;
register Real h2_0 = p[index++]; // bottleneck neuron
{
register Real h1_0 = p[index++];
register Real h1_1 = p[index++];
register Real h1_2 = p[index++];
register Real h1_3 = p[index++];
for(int i=0; i < nInput; i++) {
register Real input=in[i];
h1_0 += input * p[index++]; h1_1 += input * p[index++];
h1_2 += input * p[index++]; h1_3 += input * p[index++];
}
h1_0 = G(h1_0); h1_1 = G(h1_1);
h1_2 = G(h1_2); h1_3 = G(h1_3);
h2_0 += p[index++] * h1_0; h2_0 += p[index++] * h1_1;
h2_0 += p[index++] * h1_2; h2_0 += p[index++] * h1_3;
}
register Real h3_0 = p[index++];
register Real h3_1 = p[index++];
register Real h3_2 = p[index++];
register Real h3_3 = p[index++];
h3_0 += p[index++] * h2_0; h3_1 += p[index++] * h2_0;
h3_2 += p[index++] * h2_0; h3_3 += p[index++] * h2_0;
h3_0 = G(h3_0); h3_1 = G(h3_1);
h3_2 = G(h3_2); h3_3 = G(h3_3);
for(int i=0; i < nInput; i++) {
register Real o = p[index++];
o += h3_0 * p[index++]; o += h3_1 * p[index++];
o += h3_2 * p[index++]; o += h3_3 * p[index++];
out[i]=o;
}
}
template <typename Real>
void genData(thrust::host_vector<Real> &h_data, int nVec, Real xVar)
{
Real xMax = 1.1; Real xMin = -xMax;
Real xRange = (xMax - xMin);
for(int i=0; i < nVec; i++) {
Real t = xRange * f_rand();
Real z1 = t + xVar * f_rand();
Real z2 = t*t*t + xVar * f_rand();
h_data.push_back( z1 );
h_data.push_back( z2 );
}
}
#include <fstream>
template <typename Real>
void trainTest(char* filename, int rank, int numtasks)
{
ObjFunc<Real> testObj;
const int nParam = testObj.nParam;
cout << "nParam " << nParam << endl;
// read the test data
ifstream inFile (filename, ios::in | ios::binary);
// position 0 bytes from end
inFile.seekg(0, ios::end);
// determine the file size in bytes
ios::pos_type size = inFile.tellg();
// allocate number of Real values for this task
// (assumes a multiple of numtasks)
int nExamplesPerGPU = size/sizeof(Real)/testObj.exLen/numtasks;
thrust::host_vector<Real> h_data(nExamplesPerGPU*testObj.exLen);
// seek to the byte location in the file
inFile.seekg(rank*h_data.size()*sizeof(Real), ios::beg);
// read bytes from the file into h_data
inFile.read((char*)&h_data[0], h_data.size()*sizeof(Real));
// close the file
inFile.close();
testObj.setExamples(h_data);
int nExamples = testObj.get_nExamples();
if(rank > 0) {
testObj.objFunc( NULL );
return;
}
cout << "GB data " << (h_data.size()*sizeof(Real)/1e9) << endl;
// set the Nelder-Mead starting conditions
int icount, ifault, numres;
vector<Real> start(nParam);
vector<Real> step(nParam,1.);
vector<Real> xmin(nParam);
srand(0);
for(int i=0; i < start.size(); i++) start[i] = 0.2 * f_rand();
Real ynewlo = testObj.objFunc( &start[0] );
Real reqmin = 1.0E-18;
int konvge = 10;
int kcount = 500000; // set large for high-precision
objFunc_object = &testObj;
double optStartTime=omp_get_wtime();
nelmin<Real> (func, nParam, &start[0], &xmin[0], &ynewlo, reqmin, &step[0],
konvge, kcount, &icount, &numres, &ifault );
double optTime=omp_get_wtime()-optStartTime;
cout << endl <<" Return code IFAULT = " << ifault << endl << endl;
cout << " Estimate of minimizing value X*:" << endl << endl;
cout << " F(X*) = " << ynewlo << endl;
cout << " Number of iterations = " << icount << endl;
cout << " Number of restarts = " << numres << endl << endl;
cout << "Average wall time for ObjFunc "
<< testObj.aveObjFuncWallTime() << endl;
cout << "Total wall time in optimization method " << optTime << endl;
cout << "Percent time in objective function " <<
(100.*(testObj.totalObjFuncWallTime()/optTime)) << endl;
cout << " -- Generate scatter plot -- " << endl;
int index=0, nTest=100;
cout << "original input calculated" << endl;
thrust::host_vector<Real> h_test;
thrust::host_vector<Real> h_in(testObj.nInput);
thrust::host_vector<Real> h_out(testObj.nOutput);
genData<Real>(h_test, nTest, 0.0); // note: no variance for the test
for(int i=0; i< nTest; i++) {
h_in[0] = h_test[index++];
h_in[1] = h_test[index++];
testNN<Real,2>(&xmin[0],&h_in[0],&h_out[0]);
cout << h_data[testObj.nInput*i] << "," << h_data[testObj.nInput*i+1] << " "
<< h_in[0] << "," << h_in[1] << " "
<< h_out[0] << "," << h_out[1] << endl;
}
int op=0; // shutdown slave processes
MPI_Bcast(&op, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
#include <stdio.h>
int main(int argc, char *argv[])
{
int numtasks, rank, ret;
if(argc < 2) {
fprintf(stderr,"Use: filename\n");
exit(1);
}
ret = MPI_Init(&argc,&argv);
if (ret != MPI_SUCCESS) {
printf ("Error in MPI_Init()!\n");
MPI_Abort(MPI_COMM_WORLD, ret);
}
MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
printf ("Number of tasks= %d My rank= %d\n", numtasks,rank);
/******* do some work *******/
#ifdef USE_DBL
trainTest<double> ( argv[1], rank, numtasks );
#else
trainTest<float> ( argv[1], rank, numtasks);
#endif
MPI_Finalize();
return 0;
}
|
4efb04bf916c589a5367229c71837c80da2c7c95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <string>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <face_quda.h>
#include <tune_quda.h>
#define PRESERVE_SPINOR_NORM
#ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis
#define kP (1.0/sqrt(2.0))
#define kU (1.0/sqrt(2.0))
#else // More numerically accurate not to preserve the norm between basis
#define kP (0.5)
#define kU (1.0)
#endif
namespace quda {
using namespace colorspinor;
void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, hipStream_t *stream_p)
{
#ifdef MULTI_GPU
int nFace = 0;
for(int i=0; i<4; i++){
if(R[i] > nFace) nFace = R[i];
}
int dagger = 0;
int gatherCompleted[2] = {0,0};
int commsCompleted[2] = {0,0};
hipEvent_t gatherEnd[2];
for(int dir=0; dir<2; dir++) hipEventCreate(&gatherEnd[dir], hipEventDisableTiming);
for(int dim=3; dim<=0; dim--){
if(!commDim(dim)) continue;
spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete
hipDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1]
for(int dir=1; dir<=0; dir--){
spinor->gather(nFace, dagger, 2*dim + dir);
hipEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0]
}
int completeSum = 0;
int dir = 1;
while(completeSum < 2){
if(!gatherCompleted[dir]){
if(hipSuccess == hipEventQuery(gatherEnd[dir])){
spinor->commsStart(nFace, 2*dim+dir, dagger);
completeSum++;
gatherCompleted[dir--] = 1;
}
}
}
gatherCompleted[0] = gatherCompleted[1] = 0;
// Query if comms has completed
dir = 1;
while(completeSum < 4){
if(!commsCompleted[dir]){
if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){
spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir);
completeSum++;
commsCompleted[dir--] = 1;
}
}
}
commsCompleted[0] = commsCompleted[1] = 0;
hipDeviceSynchronize(); // Wait for scatters to complete before next iteration
} // loop over dim
for(int dir=0; dir<2; dir++) hipEventDestroy(gatherEnd[dir]);
#endif
return;
}
/** Straight copy with no basis change */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
class PreserveBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
public:
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = in[(s*Nc+c)*2+z];
}
}
}
}
};
/** Transform from relativistic into non-relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct NonRelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(-kP), static_cast<RegTypeOut>(-kP)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(kP)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z];
}
}
}
}
};
/** Transform from non-relativistic into relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct RelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(kU), static_cast<RegTypeOut>(kU)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(-kU)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z];
}
}
}
}
};
template<typename OutOrder, typename InOrder, typename Basis>
struct CopySpinorExArg{
OutOrder out;
const InOrder in;
Basis basis;
int E[QUDA_MAX_DIM];
int X[QUDA_MAX_DIM];
int length;
int parity;
CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity)
: out(out), in(in), basis(basis), parity(parity)
{
this->length = 1;
for(int d=0; d<4; d++){
this->E[d] = E[d];
this->X[d] = X[d];
this->length *= X[d]; // smaller volume
}
}
};
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X)
{
int x[4];
int R[4];
for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1;
int za = X/(arg.X[0]/2);
int x0h = X - za*(arg.X[0]/2);
int zb = za/arg.X[1];
x[1] = za - zb*arg.X[1];
x[3] = zb / arg.X[2];
x[2] = zb - x[3]*arg.X[2];
x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1);
// Y is the cb spatial index into the extended gauge field
int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1;
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
RegTypeIn in[Ns*Nc*2];
RegTypeOut out[Ns*Nc*2];
if(extend){
arg.in.load(in, X);
arg.basis(out, in);
arg.out.save(out, Y);
}else{
arg.in.load(in, Y);
arg.basis(out,in);
arg.out.save(out, Y);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg)
{
int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
while(cb_idx < arg.length){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx);
cb_idx += gridDim.x*blockDim.x;
}
}
/*
Host function
*/
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg)
{
for(int cb_idx=0; cb_idx<arg.length; cb_idx++){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
class CopySpinorEx : Tunable {
CopySpinorExArg<OutOrder,InOrder,Basis> arg;
const ColorSpinorField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool advanceSharedBytes(TuneParam ¶m) const { return false; } // Don't tune shared mem
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.length; }
public:
CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride);
}
virtual ~CopySpinorEx() {}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(location == QUDA_CPU_FIELD_LOCATION){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg);
}else if(location == QUDA_CUDA_FIELD_LOCATION){
hipLaunchKernelGGL(( copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>)
, dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const {
return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut));
}
}; // CopySpinorEx
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis>
void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E,
const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location)
{
CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity);
if(extend){
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location);
copier.apply(0);
}else{
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location);
copier.apply(0);
}
if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder>
void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis,
const int* E, const int* X, const int parity, const bool extend,
const ColorSpinorField &meta, QudaFieldLocation location)
{
if(inBasis == outBasis){
PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
NonRelBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
RelBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else{
errorQuda("Basis change not supported");
}
}
// Need to rewrite the following two functions...
// Decide on the output order
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder>
void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out,
QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend,
QudaFieldLocation location, FloatOut *Out, float *outNorm){
if(out.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){
FloatNOrder<FloatOut, Ns, Nc, 4> outOrder(out, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
}else if(out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){
FloatNOrder<FloatOut, Ns, Nc, 2> outOrder(out, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
#if 0
}else if(out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){
SpaceSpinorColorOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
}else if(out.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){
SpaceColorSpinorOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
} else if (out.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
#ifdef BUILD_QDPJIT_INTERFACE
QDPJITDiracOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
#endif
}else{
errorQuda("Order not defined");
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc>
void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in,
const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In,
float* outNorm, float *inNorm){
int E[4];
int X[4];
const bool extend = (out.Volume() >= in.Volume());
if(extend){
for(int d=0; d<4; d++){
E[d] = out.X()[d];
X[d] = in.X()[d];
}
}else{
for(int d=0; d<4; d++){
E[d] = in.X()[d];
X[d] = out.X()[d];
}
}
X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time
if(in.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){
FloatNOrder<FloatIn,Ns,Nc,4> inOrder(in, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if(in.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){
FloatNOrder<FloatIn,Ns,Nc,2> inOrder(in, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
#if 0
}else if(in.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){
SpaceSpinorColorOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if(in.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){
SpaceColorSpinorOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if (in.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
#ifdef BUILD_QDPJIT_INTERFACE
QDPJITDiracOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend,location, Out, outNorm);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
#endif
}else{
errorQuda("Order not defined");
}
}
template<int Ns, typename dstFloat, typename srcFloat>
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm, float *srcNorm) {
if(dst.Ndim() != src.Ndim())
errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim());
if(!(dst.SiteOrder() == src.SiteOrder() ||
(dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER &&
src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) ||
(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER &&
src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){
errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder());
}
if(dst.SiteSubset() != src.SiteSubset())
errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset());
if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported");
const int Nc = 3;
// We currently only support parity-ordered fields; even-odd or odd-even
if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){
errorQuda("Copying to full fields with lexicographical ordering is not currently supported");
}
if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){
if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER ||
dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
errorQuda("QDPJIT field ordering not supported for full site fields");
}
// set for the source subset ordering
srcFloat *srcEven = Src ? Src : (srcFloat*)src.V();
srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2);
float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm();
float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2);
if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<srcFloat*>(srcEven, srcOdd);
std::swap<float*>(srcNormEven, srcNormOdd);
}
// set for the destination subset ordering
dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V();
dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2);
float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm();
float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2);
if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<dstFloat*>(dstEven, dstOdd);
std::swap<float*>(dstNormEven, dstNormOdd);
}
// should be able to apply to select either even or odd parity at this point as well.
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven);
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd);
}else{
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
} // N.B. Need to update this to account for differences in parity
}
template<typename dstFloat, typename srcFloat>
void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm=0, float *srcNorm=0)
{
if(dst.Nspin() != src.Nspin())
errorQuda("source and destination spins must match");
if(dst.Nspin() == 4){
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin());
#endif
}else if(dst.Nspin() == 1){
#ifdef GPU_STAGGERED_DIRAC
copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin());
#endif
}else{
errorQuda("Nspin=%d unsupported", dst.Nspin());
}
}
// There's probably no need to have the additional Dst and Src arguments here!
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
QudaFieldLocation location, const int parity, void *Dst, void *Src,
void *dstNorm, void *srcNorm){
if(dst.Precision() == QUDA_DOUBLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
} else {
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
}else{
errorQuda("Unsupported Precision %d", dst.Precision());
}
}
} // quda
| 4efb04bf916c589a5367229c71837c80da2c7c95.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <face_quda.h>
#include <tune_quda.h>
#define PRESERVE_SPINOR_NORM
#ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis
#define kP (1.0/sqrt(2.0))
#define kU (1.0/sqrt(2.0))
#else // More numerically accurate not to preserve the norm between basis
#define kP (0.5)
#define kU (1.0)
#endif
namespace quda {
using namespace colorspinor;
void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, cudaStream_t *stream_p)
{
#ifdef MULTI_GPU
int nFace = 0;
for(int i=0; i<4; i++){
if(R[i] > nFace) nFace = R[i];
}
int dagger = 0;
int gatherCompleted[2] = {0,0};
int commsCompleted[2] = {0,0};
cudaEvent_t gatherEnd[2];
for(int dir=0; dir<2; dir++) cudaEventCreate(&gatherEnd[dir], cudaEventDisableTiming);
for(int dim=3; dim<=0; dim--){
if(!commDim(dim)) continue;
spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete
cudaDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1]
for(int dir=1; dir<=0; dir--){
spinor->gather(nFace, dagger, 2*dim + dir);
cudaEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0]
}
int completeSum = 0;
int dir = 1;
while(completeSum < 2){
if(!gatherCompleted[dir]){
if(cudaSuccess == cudaEventQuery(gatherEnd[dir])){
spinor->commsStart(nFace, 2*dim+dir, dagger);
completeSum++;
gatherCompleted[dir--] = 1;
}
}
}
gatherCompleted[0] = gatherCompleted[1] = 0;
// Query if comms has completed
dir = 1;
while(completeSum < 4){
if(!commsCompleted[dir]){
if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){
spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir);
completeSum++;
commsCompleted[dir--] = 1;
}
}
}
commsCompleted[0] = commsCompleted[1] = 0;
cudaDeviceSynchronize(); // Wait for scatters to complete before next iteration
} // loop over dim
for(int dir=0; dir<2; dir++) cudaEventDestroy(gatherEnd[dir]);
#endif
return;
}
/** Straight copy with no basis change */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
class PreserveBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
public:
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = in[(s*Nc+c)*2+z];
}
}
}
}
};
/** Transform from relativistic into non-relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct NonRelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(-kP), static_cast<RegTypeOut>(-kP)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(-kP),
static_cast<RegTypeOut>(kP), static_cast<RegTypeOut>(kP)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z];
}
}
}
}
};
/** Transform from non-relativistic into relavisitic basis */
template <typename FloatOut, typename FloatIn, int Ns, int Nc>
struct RelBasis {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
__device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) {
int s1[4] = {1, 2, 3, 0};
int s2[4] = {3, 0, 1, 2};
RegTypeOut K1[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(kU), static_cast<RegTypeOut>(kU)};
RegTypeOut K2[4] = {static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(kU),
static_cast<RegTypeOut>(-kU), static_cast<RegTypeOut>(-kU)};
for (int s=0; s<Ns; s++) {
for (int c=0; c<Nc; c++) {
for (int z=0; z<2; z++) {
out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z];
}
}
}
}
};
template<typename OutOrder, typename InOrder, typename Basis>
struct CopySpinorExArg{
OutOrder out;
const InOrder in;
Basis basis;
int E[QUDA_MAX_DIM];
int X[QUDA_MAX_DIM];
int length;
int parity;
CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity)
: out(out), in(in), basis(basis), parity(parity)
{
this->length = 1;
for(int d=0; d<4; d++){
this->E[d] = E[d];
this->X[d] = X[d];
this->length *= X[d]; // smaller volume
}
}
};
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X)
{
int x[4];
int R[4];
for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1;
int za = X/(arg.X[0]/2);
int x0h = X - za*(arg.X[0]/2);
int zb = za/arg.X[1];
x[1] = za - zb*arg.X[1];
x[3] = zb / arg.X[2];
x[2] = zb - x[3]*arg.X[2];
x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1);
// Y is the cb spatial index into the extended gauge field
int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1;
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
RegTypeIn in[Ns*Nc*2];
RegTypeOut out[Ns*Nc*2];
if(extend){
arg.in.load(in, X);
arg.basis(out, in);
arg.out.save(out, Y);
}else{
arg.in.load(in, Y);
arg.basis(out,in);
arg.out.save(out, Y);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
__global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg)
{
int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
while(cb_idx < arg.length){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx);
cb_idx += gridDim.x*blockDim.x;
}
}
/*
Host function
*/
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg)
{
for(int cb_idx=0; cb_idx<arg.length; cb_idx++){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx);
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend>
class CopySpinorEx : Tunable {
CopySpinorExArg<OutOrder,InOrder,Basis> arg;
const ColorSpinorField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool advanceSharedBytes(TuneParam ¶m) const { return false; } // Don't tune shared mem
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.length; }
public:
CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride);
}
virtual ~CopySpinorEx() {}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(location == QUDA_CPU_FIELD_LOCATION){
copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg);
}else if(location == QUDA_CUDA_FIELD_LOCATION){
copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>
<<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
long long flops() const { return 0; }
long long bytes() const {
return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut));
}
}; // CopySpinorEx
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis>
void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E,
const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location)
{
CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity);
if(extend){
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location);
copier.apply(0);
}else{
CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location);
copier.apply(0);
}
if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder>
void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis,
const int* E, const int* X, const int parity, const bool extend,
const ColorSpinorField &meta, QudaFieldLocation location)
{
if(inBasis == outBasis){
PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
NonRelBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){
if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns);
RelBasis<FloatOut,FloatIn,Ns,Nc> basis;
copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,Ns,Nc> >
(outOrder, inOrder, basis, E, X, parity, extend, meta, location);
}else{
errorQuda("Basis change not supported");
}
}
// Need to rewrite the following two functions...
// Decide on the output order
template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder>
void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out,
QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend,
QudaFieldLocation location, FloatOut *Out, float *outNorm){
if(out.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){
FloatNOrder<FloatOut, Ns, Nc, 4> outOrder(out, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
}else if(out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){
FloatNOrder<FloatOut, Ns, Nc, 2> outOrder(out, Out, outNorm);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
#if 0
}else if(out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){
SpaceSpinorColorOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
}else if(out.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){
SpaceColorSpinorOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
} else if (out.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
#ifdef BUILD_QDPJIT_INTERFACE
QDPJITDiracOrder<FloatOut, Ns, Nc> outOrder(out, Out);
copySpinorEx<FloatOut,FloatIn,Ns,Nc>
(outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
#endif
}else{
errorQuda("Order not defined");
}
}
template<typename FloatOut, typename FloatIn, int Ns, int Nc>
void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in,
const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In,
float* outNorm, float *inNorm){
int E[4];
int X[4];
const bool extend = (out.Volume() >= in.Volume());
if(extend){
for(int d=0; d<4; d++){
E[d] = out.X()[d];
X[d] = in.X()[d];
}
}else{
for(int d=0; d<4; d++){
E[d] = in.X()[d];
X[d] = out.X()[d];
}
}
X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time
if(in.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){
FloatNOrder<FloatIn,Ns,Nc,4> inOrder(in, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if(in.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){
FloatNOrder<FloatIn,Ns,Nc,2> inOrder(in, In, inNorm);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
#if 0
}else if(in.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){
SpaceSpinorColorOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if(in.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){
SpaceColorSpinorOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm);
}else if (in.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
#ifdef BUILD_QDPJIT_INTERFACE
QDPJITDiracOrder<FloatIn,Ns,Nc> inOrder(in, In);
extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend,location, Out, outNorm);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
#endif
}else{
errorQuda("Order not defined");
}
}
template<int Ns, typename dstFloat, typename srcFloat>
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm, float *srcNorm) {
if(dst.Ndim() != src.Ndim())
errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim());
if(!(dst.SiteOrder() == src.SiteOrder() ||
(dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER &&
src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) ||
(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER &&
src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){
errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder());
}
if(dst.SiteSubset() != src.SiteSubset())
errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset());
if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported");
const int Nc = 3;
// We currently only support parity-ordered fields; even-odd or odd-even
if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){
errorQuda("Copying to full fields with lexicographical ordering is not currently supported");
}
if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){
if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER ||
dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){
errorQuda("QDPJIT field ordering not supported for full site fields");
}
// set for the source subset ordering
srcFloat *srcEven = Src ? Src : (srcFloat*)src.V();
srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2);
float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm();
float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2);
if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<srcFloat*>(srcEven, srcOdd);
std::swap<float*>(srcNormEven, srcNormOdd);
}
// set for the destination subset ordering
dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V();
dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2);
float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm();
float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2);
if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){
std::swap<dstFloat*>(dstEven, dstOdd);
std::swap<float*>(dstNormEven, dstNormOdd);
}
// should be able to apply to select either even or odd parity at this point as well.
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven);
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd);
}else{
extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc>
(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
} // N.B. Need to update this to account for differences in parity
}
template<typename dstFloat, typename srcFloat>
void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src,
float *dstNorm=0, float *srcNorm=0)
{
if(dst.Nspin() != src.Nspin())
errorQuda("source and destination spins must match");
if(dst.Nspin() == 4){
#if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC)
copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin());
#endif
}else if(dst.Nspin() == 1){
#ifdef GPU_STAGGERED_DIRAC
copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm);
#else
errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin());
#endif
}else{
errorQuda("Nspin=%d unsupported", dst.Nspin());
}
}
// There's probably no need to have the additional Dst and Src arguments here!
void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src,
QudaFieldLocation location, const int parity, void *Dst, void *Src,
void *dstNorm, void *srcNorm){
if(dst.Precision() == QUDA_DOUBLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
} else {
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_SINGLE_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src));
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src));
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
} else if (dst.Precision() == QUDA_HALF_PRECISION){
if(src.Precision() == QUDA_DOUBLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_SINGLE_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0);
}else if(src.Precision() == QUDA_HALF_PRECISION){
CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm));
}else{
errorQuda("Unsupported Precision %d", src.Precision());
}
}else{
errorQuda("Unsupported Precision %d", dst.Precision());
}
}
} // quda
|
3693fdd9cc8ce870435275c17a92ebe9249eb8e6.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2021 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
dh::caching_device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
common::Monitor monitor;
TreeEvaluator tree_evaluator;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
// Storing split categories for last node.
dh::caching_device_vector<uint32_t> node_categories;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
tree_evaluator(param, n_features, _device_id),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(device_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = RegTree::kRoot;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>();
EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
GPUExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{
left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right);
dh::TemporaryArray<GPUExpandEntry> entries(2);
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
auto d_entries = entries.data().get();
dh::LaunchN(2, [=] __device__(size_t idx) {
auto split = d_splits_out[idx];
auto nidx = idx == 0 ? left_nidx : right_nidx;
float base_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.left_sum + split.right_sum});
float left_weight =
evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.right_sum});
d_entries[idx] =
GPUExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx],
base_weight, left_weight, right_weight};
});
dh::safe_cuda(hipMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id),
feature_groups->DeviceAccessor(device_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[nidx];
auto split_type = p_tree->NodeSplitType(nidx);
auto d_matrix = page->GetDeviceAccessor(device_id);
auto node_cats = dh::ToSpan(node_categories);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision(node_cats, common::AsCat(cut_value));
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
}
}
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, common::AsCat(element));
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(VectorView<float> out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
CHECK_EQ(out_preds_d.DeviceIdx(), device_id);
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
dh::LaunchN(d_ridx.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = evaluator.CalcWeight(
pos, param_d, GradStats{d_node_sum_gradients[pos]});
static_assert(!std::is_const<decltype(out_preds_d)>::value, "");
auto v_predt = out_preds_d; // for some reason out_preds_d is const by both nvcc and clang.
v_predt[d_ridx[local_idx]] += weight * param_d.learning_rate;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const GPUExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
auto evaluator = tree_evaluator.GetEvaluator();
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
auto cat = common::AsCat(candidate.split.fvalue);
common::CheckCat(cat);
std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(::max(cat+1, 1)), 0);
LBitField32 cats_bits(split_cats);
cats_bits.Set(cat);
dh::CopyToD(split_cats, &node_categories);
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats,
candidate.split.dir == kLeftDir, base_weight, left_weight,
right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
}
// Set up child constraints
auto left_child = tree[candidate.nid].LeftChild();
auto right_child = tree[candidate.nid].RightChild();
tree_evaluator.AddSplit(candidate.nid, left_child, right_child,
tree[candidate.nid].SplitIndex(), candidate.left_weight,
candidate.right_weight);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = dh::Reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()),
GradientPair{}, thrust::plus<GradientPair>{});
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
dh::TemporaryArray<GPUExpandEntry> entries(1);
auto d_entries = entries.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
GPUTrainingParam gpu_param(param);
auto depth = p_tree->GetDepth(kRootNIdx);
dh::LaunchN(1, [=] __device__(size_t idx) {
float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param,
GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
kRootNIdx, gpu_param, GradStats{split.right_sum});
d_entries[0] =
GPUExpandEntry(kRootNIdx, depth, split,
weight, left_weight, right_weight);
});
GPUExpandEntry root_entry;
dh::safe_cuda(hipMemcpyAsync(
&root_entry, entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), hipMemcpyDeviceToHost));
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver<GPUExpandEntry> driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(expand_set.size() * 2, GPUExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (GPUExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = GPUExpandEntry();
new_candidates[i * 2 + 1] = GPUExpandEntry();
}
}
dh::safe_cuda(hipDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
info_->feature_types.SetDevice(device_);
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->feature_types.ConstDeviceSpan(),
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, VectorView<bst_float> p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
maker->UpdatePredictionCache(p_out_preds);
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(const DMatrix *data,
VectorView<bst_float> p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 3693fdd9cc8ce870435275c17a92ebe9249eb8e6.cu | /*!
* Copyright 2017-2021 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
dh::caching_device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
common::Monitor monitor;
TreeEvaluator tree_evaluator;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
// Storing split categories for last node.
dh::caching_device_vector<uint32_t> node_categories;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
tree_evaluator(param, n_features, _device_id),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(device_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weigths.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
tree_evaluator = TreeEvaluator(param, dmat->Info().num_col_, device_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = RegTree::kRoot;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx)};
auto gain_calc = tree_evaluator.GetEvaluator<GPUTrainingParam>();
EvaluateSingleSplit(dh::ToSpan(splits_out), gain_calc, inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result.front();
}
void EvaluateLeftRightSplits(
GPUExpandEntry candidate, int left_nidx, int right_nidx, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{
left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx)};
auto d_splits_out = dh::ToSpan(splits_out);
EvaluateSplits(d_splits_out, tree_evaluator.GetEvaluator<GPUTrainingParam>(), left, right);
dh::TemporaryArray<GPUExpandEntry> entries(2);
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
auto d_entries = entries.data().get();
dh::LaunchN(2, [=] __device__(size_t idx) {
auto split = d_splits_out[idx];
auto nidx = idx == 0 ? left_nidx : right_nidx;
float base_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.left_sum + split.right_sum});
float left_weight =
evaluator.CalcWeight(nidx, gpu_param, GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
nidx, gpu_param, GradStats{split.right_sum});
d_entries[idx] =
GPUExpandEntry{nidx, candidate.depth + 1, d_splits_out[idx],
base_weight, left_weight, right_weight};
});
dh::safe_cuda(cudaMemcpyAsync(
pinned_candidates_out.data(), entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id),
feature_groups->DeviceAccessor(device_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree* p_tree) {
RegTree::Node split_node = (*p_tree)[nidx];
auto split_type = p_tree->NodeSplitType(nidx);
auto d_matrix = page->GetDeviceAccessor(device_id);
auto node_cats = dh::ToSpan(node_categories);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
bst_node_t new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
bool go_left = true;
if (split_type == FeatureType::kCategorical) {
go_left = common::Decision(node_cats, common::AsCat(cut_value));
} else {
go_left = cut_value <= split_node.SplitCond();
}
if (go_left) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes),
dh::ToSpan(d_split_types), dh::ToSpan(d_categories),
dh::ToSpan(d_categories_segments));
}
}
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats =
categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, common::AsCat(element));
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(VectorView<float> out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
CHECK_EQ(out_preds_d.DeviceIdx(), device_id);
auto d_ridx = row_partitioner->GetRows();
GPUTrainingParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
dh::LaunchN(d_ridx.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = evaluator.CalcWeight(
pos, param_d, GradStats{d_node_sum_gradients[pos]});
static_assert(!std::is_const<decltype(out_preds_d)>::value, "");
auto v_predt = out_preds_d; // for some reason out_preds_d is const by both nvcc and clang.
v_predt[d_ridx[local_idx]] += weight * param_d.learning_rate;
});
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const GPUExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
auto evaluator = tree_evaluator.GetEvaluator();
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
auto cat = common::AsCat(candidate.split.fvalue);
common::CheckCat(cat);
std::vector<uint32_t> split_cats(LBitField32::ComputeStorageSize(std::max(cat+1, 1)), 0);
LBitField32 cats_bits(split_cats);
cats_bits.Set(cat);
dh::CopyToD(split_cats, &node_categories);
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats,
candidate.split.dir == kLeftDir, base_weight, left_weight,
right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(),
candidate.split.right_sum.GetHess());
}
// Set up child constraints
auto left_child = tree[candidate.nid].LeftChild();
auto right_child = tree[candidate.nid].RightChild();
tree_evaluator.AddSplit(candidate.nid, left_child, right_child,
tree[candidate.nid].SplitIndex(), candidate.left_weight,
candidate.right_weight);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = dh::Reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()),
GradientPair{}, thrust::plus<GradientPair>{});
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
dh::TemporaryArray<GPUExpandEntry> entries(1);
auto d_entries = entries.data().get();
auto evaluator = tree_evaluator.GetEvaluator<GPUTrainingParam>();
GPUTrainingParam gpu_param(param);
auto depth = p_tree->GetDepth(kRootNIdx);
dh::LaunchN(1, [=] __device__(size_t idx) {
float left_weight = evaluator.CalcWeight(kRootNIdx, gpu_param,
GradStats{split.left_sum});
float right_weight = evaluator.CalcWeight(
kRootNIdx, gpu_param, GradStats{split.right_sum});
d_entries[0] =
GPUExpandEntry(kRootNIdx, depth, split,
weight, left_weight, right_weight);
});
GPUExpandEntry root_entry;
dh::safe_cuda(cudaMemcpyAsync(
&root_entry, entries.data().get(),
sizeof(GPUExpandEntry) * entries.size(), cudaMemcpyDeviceToHost));
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
Driver<GPUExpandEntry> driver(static_cast<TrainParam::TreeGrowPolicy>(param.grow_policy));
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
auto num_leaves = 1;
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(expand_set.size() * 2, GPUExpandEntry());
for (auto i = 0ull; i < expand_set.size(); i++) {
auto candidate = expand_set.at(i);
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (GPUExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.Start("UpdatePosition");
this->UpdatePosition(candidate.nid, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx, *p_tree,
new_candidates.subspan(i * 2, 2));
monitor.Stop("EvaluateSplits");
} else {
// Set default
new_candidates[i * 2] = GPUExpandEntry();
new_candidates[i * 2 + 1] = GPUExpandEntry();
}
}
dh::safe_cuda(cudaDeviceSynchronize());
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.Stop("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
info_->feature_types.SetDevice(device_);
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->feature_types.ConstDeviceSpan(),
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.Start("InitData");
this->InitData(p_fmat);
monitor_.Stop("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, VectorView<bst_float> p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
maker->UpdatePredictionCache(p_out_preds);
monitor_.Stop("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(const DMatrix *data,
VectorView<bst_float> p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
24555cc433a2181a2b639937eca7e54ecb6c8767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,int var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17) {
for (int i=0; i < var_1; ++i) {
if (comp > var_2 / (-1.6664E-27f * (+0.0f - var_3))) {
comp += (+1.8770E35f * var_5);
for (int i=0; i < var_4; ++i) {
var_6[i] = -1.6712E35f;
float tmp_1 = -1.8464E-36f;
comp = tmp_1 / var_6[i] + (var_7 + var_8);
}
if (comp > fmodf(+1.6640E-41f, +1.5258E-35f * logf(logf(-0.0f * (var_9 / (+0.0f * var_10 - -1.9280E-26f)))))) {
comp = -1.5239E18f - var_11;
comp += -0.0f / +1.4122E36f / var_12 / (var_13 + (var_14 * var_15));
float tmp_2 = -1.5238E-43f;
comp = tmp_2 / (-1.8835E-44f - var_16 - var_17);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18);
hipDeviceSynchronize();
return 0;
}
| 24555cc433a2181a2b639937eca7e54ecb6c8767.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,int var_4,float var_5,float* var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17) {
for (int i=0; i < var_1; ++i) {
if (comp > var_2 / (-1.6664E-27f * (+0.0f - var_3))) {
comp += (+1.8770E35f * var_5);
for (int i=0; i < var_4; ++i) {
var_6[i] = -1.6712E35f;
float tmp_1 = -1.8464E-36f;
comp = tmp_1 / var_6[i] + (var_7 + var_8);
}
if (comp > fmodf(+1.6640E-41f, +1.5258E-35f * logf(logf(-0.0f * (var_9 / (+0.0f * var_10 - -1.9280E-26f)))))) {
comp = -1.5239E18f - var_11;
comp += -0.0f / +1.4122E36f / var_12 / (var_13 + (var_14 * var_15));
float tmp_2 = -1.5238E-43f;
comp = tmp_2 / (-1.8835E-44f - var_16 - var_17);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float* tmp_7 = initPointer( atof(argv[7]) );
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18);
cudaDeviceSynchronize();
return 0;
}
|
83d440f4469c81dc0917ad68132f6a027cb2bad9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../Headers/DE.cuh"
void DE::runDE()
{
//set DE Variables
// const int NP = 80;
// const double F = 0.7;
// const double CR = 0.5;
int NP;
double F,CR;
if (methodName == "cir")
{
NP = 55;
CR = 0.6;
F = 0.5;
}
else
{
NP = 70;
CR = 0.85;
F = 0.6;
}
// Generate the Best Possible r0
int rLog = ::floor(::log(crrntMonthMrktDataVec[0]));
double reducer = ::pow(10,rLog);
double r0 = crrntMonthMrktDataVec[0] - reducer;
// use vasicek or cir method
double cirFlag = 1.0;
if(methodName == "cir") cirFlag == std::sqrt(r0);
const int tau = 9;
const int scenarioCount = 10000;
// Define General Variables
dim3 threadsPerBlock = 1024;
dim3 numBlocks = 1024;
// TODO: Define Generic Variables that later would be changeable
int mpCount = 3; // This is 3 because CIR and vasicek both have 3 parameter
double up[3] = {0.25, 0.05, 0.005};
double lo[3] = {0.000001, 0.00001, 0.00001};
thrust::device_vector <double> upperBound(up, up + 3);
thrust::device_vector <double> lowerBound(lo, lo + 3);
double maturityArray[] = {0.25, 1, 3, 5, 7, 10, 15, 20, 30};
// Define Device Variables
thrust::device_vector < double> alphaFinal(NP);
thrust::device_vector < double> betaFinal(NP);
thrust::device_vector < double> sigmaFinal(NP);
thrust::device_vector < double> alphaNew(NP);
thrust::device_vector < double> betaNew(NP);
thrust::device_vector < double> sigmaNew(NP);
thrust::device_vector < int > mutIndx(NP * 3);
thrust::device_vector < double> mutRandVals(NP);
thrust::device_vector < double> nextRateRands(scenarioCount);
thrust::device_vector < double> deltaR(NP * scenarioCount);
thrust::device_vector < double> deltaR64(NP);
thrust::device_vector < double> rNext(NP);
thrust::device_vector < double> maturity(maturityArray, maturityArray + 9);
thrust::device_vector < double> errorFinal(NP);
thrust::device_vector < double> errorNew(NP);
thrust::device_vector < double> crrntMonthMdlData(tau);
thrust::device_vector < double> crrntMonthMrktData = crrntMonthMrktDataVec;
double errorAverage = 1.0;
double lastErrorAverage = 2.0;
double tol = 0.00000001;
gens = 1;
auto start = std::chrono::steady_clock::now();
// Initialize Curand and genererate the random populations
hiprandState_t *dState;
hipMalloc(&dState, NP * mpCount * sizeof(hiprandState_t));
hipLaunchKernelGGL(( initializeCurand) , dim3(512),dim3(512) , 0, 0, dState , time(NULL));
hipDeviceSynchronize();
hipLaunchKernelGGL(( initializeNextRateRands) , dim3(512),dim3(512) , 0, 0, dState, nextRateRands);
hipLaunchKernelGGL(( initializePopulation) , dim3(16),dim3(16) , 0, 0, dState, alphaFinal, betaFinal, sigmaFinal,
NP, lowerBound, upperBound);
while(std::abs(errorAverage -lastErrorAverage) > tol && gens < 50)
{
hipLaunchKernelGGL(( creatMutationIndexes) , dim3(16),dim3(16) , 0, 0, dState, NP, mutIndx, mutRandVals );
hipDeviceSynchronize();
// Reset The Fragile Vectors
thrust::fill( deltaR.begin(), deltaR.end(), 0.0);
thrust::fill( deltaR64.begin(), deltaR64.end(), 0.0);
thrust::fill( errorFinal.begin(), errorFinal.end(), 0.0);
hipLaunchKernelGGL(( evaluateVasicek) , dim3(1024),dim3(1024) , 0, 0, crrntMonthMdlData, crrntMonthMrktData,
alphaFinal, betaFinal, sigmaFinal, nextRateRands, NP, r0, deltaR, deltaR64,
rNext, maturity, errorFinal, cirFlag, dtTerm);
// check Tolerance
errorAverage = thrust::reduce(errorFinal.begin(), errorFinal.end()) / errorFinal.size();
std::cout << "average error for Generation " << gens << " is: "<< errorAverage << std::endl;
gens++;
lastErrorAverage = errorAverage;
hipDeviceSynchronize();
hipLaunchKernelGGL(( mutateAndCrossOver) , dim3(16),dim3(16) , 0, 0, NP, CR, F, mutIndx, mutRandVals, alphaNew, betaNew,
sigmaNew, alphaFinal, betaFinal, sigmaFinal, lowerBound, upperBound);
hipDeviceSynchronize();
// Reset The Fragile Vectors
thrust::fill( deltaR.begin(), deltaR.end(), 0.0);
thrust::fill( deltaR64.begin(), deltaR64.end(), 0.0);
thrust::fill( errorNew.begin(), errorNew.end(), 0.0);
hipLaunchKernelGGL(( evaluateVasicek) , dim3(1024),dim3(1024) , 0, 0, crrntMonthMdlData, crrntMonthMrktData,
alphaNew, betaNew, sigmaNew, nextRateRands, NP, r0, deltaR, deltaR64,
rNext, maturity, errorNew, cirFlag, dtTerm);
hipDeviceSynchronize();
hipLaunchKernelGGL(( selectMutatedOrOriginal) , dim3(16),dim3(16) , 0, 0, alphaFinal, betaFinal, sigmaFinal, alphaNew, betaNew,
sigmaNew, errorFinal, errorNew, NP);
hipDeviceSynchronize();
}
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> durationCount = end - start;
calTime = durationCount.count();
thrust::device_vector<double>::iterator iter = thrust::min_element(errorFinal.begin(), errorFinal.end());
unsigned int minErrorPosition = iter - errorFinal.begin();
avgError = errorFinal[minErrorPosition] ;
alpha = alphaFinal[minErrorPosition];
beta = betaFinal[minErrorPosition];
sigma = sigmaFinal[minErrorPosition];
std::cout << "Final Error: " << avgError << std::endl;
std::cout << "Final Alpha: " << alpha << std::endl;
std::cout << "Final Beta: " << beta << std::endl;
std::cout << "Final Sigma: " << sigma << std::endl;
std::cout << "Calculation Time: " << calTime << std::endl;
std::cout << "NewR: " << rNext[minErrorPosition] << std::endl;
for (size_t i = 0; i < 9; i++)
crrntMonthMdlDataArray[i] = getYield(maturityArray[i], alpha, beta, sigma, rNext[minErrorPosition], cirFlag);
// return rNext[minErrorPosition];
}
/****************************************************************************/
/******************** Setters and Getters are here **************************/
/****************************************************************************/
DE::DE(std::string m, const double dt)
{
methodName = m;
dtTerm = dt;
}
const double& DE::getAlpha() const { return alpha; }
const double& DE::getBeta() const { return beta; }
const double& DE::getSigma() const { return sigma; }
const double& DE::getError() const { return avgError; }
const int& DE::getIter() const { return gens; }
const double& DE::getTime() const { return calTime; }
const std::array<double, 9>& DE::getMdlArray() const
{
return crrntMonthMdlDataArray;
}
void DE::setMrktArray(std::vector<double> const& mrktData)
{
crrntMonthMrktDataVec = mrktData;
}
| 83d440f4469c81dc0917ad68132f6a027cb2bad9.cu | #include "../Headers/DE.cuh"
void DE::runDE()
{
//set DE Variables
// const int NP = 80;
// const double F = 0.7;
// const double CR = 0.5;
int NP;
double F,CR;
if (methodName == "cir")
{
NP = 55;
CR = 0.6;
F = 0.5;
}
else
{
NP = 70;
CR = 0.85;
F = 0.6;
}
// Generate the Best Possible r0
int rLog = std::floor(std::log(crrntMonthMrktDataVec[0]));
double reducer = std::pow(10,rLog);
double r0 = crrntMonthMrktDataVec[0] - reducer;
// use vasicek or cir method
double cirFlag = 1.0;
if(methodName == "cir") cirFlag == std::sqrt(r0);
const int tau = 9;
const int scenarioCount = 10000;
// Define General Variables
dim3 threadsPerBlock = 1024;
dim3 numBlocks = 1024;
// TODO: Define Generic Variables that later would be changeable
int mpCount = 3; // This is 3 because CIR and vasicek both have 3 parameter
double up[3] = {0.25, 0.05, 0.005};
double lo[3] = {0.000001, 0.00001, 0.00001};
thrust::device_vector <double> upperBound(up, up + 3);
thrust::device_vector <double> lowerBound(lo, lo + 3);
double maturityArray[] = {0.25, 1, 3, 5, 7, 10, 15, 20, 30};
// Define Device Variables
thrust::device_vector < double> alphaFinal(NP);
thrust::device_vector < double> betaFinal(NP);
thrust::device_vector < double> sigmaFinal(NP);
thrust::device_vector < double> alphaNew(NP);
thrust::device_vector < double> betaNew(NP);
thrust::device_vector < double> sigmaNew(NP);
thrust::device_vector < int > mutIndx(NP * 3);
thrust::device_vector < double> mutRandVals(NP);
thrust::device_vector < double> nextRateRands(scenarioCount);
thrust::device_vector < double> deltaR(NP * scenarioCount);
thrust::device_vector < double> deltaR64(NP);
thrust::device_vector < double> rNext(NP);
thrust::device_vector < double> maturity(maturityArray, maturityArray + 9);
thrust::device_vector < double> errorFinal(NP);
thrust::device_vector < double> errorNew(NP);
thrust::device_vector < double> crrntMonthMdlData(tau);
thrust::device_vector < double> crrntMonthMrktData = crrntMonthMrktDataVec;
double errorAverage = 1.0;
double lastErrorAverage = 2.0;
double tol = 0.00000001;
gens = 1;
auto start = std::chrono::steady_clock::now();
// Initialize Curand and genererate the random populations
curandState *dState;
cudaMalloc(&dState, NP * mpCount * sizeof(curandState));
initializeCurand <<< 512,512 >>> (dState , time(NULL));
cudaThreadSynchronize();
initializeNextRateRands <<< 512,512 >>> (dState, nextRateRands);
initializePopulation <<< 16,16 >>> (dState, alphaFinal, betaFinal, sigmaFinal,
NP, lowerBound, upperBound);
while(std::abs(errorAverage -lastErrorAverage) > tol && gens < 50)
{
creatMutationIndexes <<< 16,16 >>> (dState, NP, mutIndx, mutRandVals );
cudaThreadSynchronize();
// Reset The Fragile Vectors
thrust::fill( deltaR.begin(), deltaR.end(), 0.0);
thrust::fill( deltaR64.begin(), deltaR64.end(), 0.0);
thrust::fill( errorFinal.begin(), errorFinal.end(), 0.0);
evaluateVasicek <<< 1024,1024 >>> (crrntMonthMdlData, crrntMonthMrktData,
alphaFinal, betaFinal, sigmaFinal, nextRateRands, NP, r0, deltaR, deltaR64,
rNext, maturity, errorFinal, cirFlag, dtTerm);
// check Tolerance
errorAverage = thrust::reduce(errorFinal.begin(), errorFinal.end()) / errorFinal.size();
std::cout << "average error for Generation " << gens << " is: "<< errorAverage << std::endl;
gens++;
lastErrorAverage = errorAverage;
cudaThreadSynchronize();
mutateAndCrossOver <<< 16,16 >>> (NP, CR, F, mutIndx, mutRandVals, alphaNew, betaNew,
sigmaNew, alphaFinal, betaFinal, sigmaFinal, lowerBound, upperBound);
cudaThreadSynchronize();
// Reset The Fragile Vectors
thrust::fill( deltaR.begin(), deltaR.end(), 0.0);
thrust::fill( deltaR64.begin(), deltaR64.end(), 0.0);
thrust::fill( errorNew.begin(), errorNew.end(), 0.0);
evaluateVasicek <<< 1024,1024 >>> (crrntMonthMdlData, crrntMonthMrktData,
alphaNew, betaNew, sigmaNew, nextRateRands, NP, r0, deltaR, deltaR64,
rNext, maturity, errorNew, cirFlag, dtTerm);
cudaThreadSynchronize();
selectMutatedOrOriginal <<< 16,16 >>> (alphaFinal, betaFinal, sigmaFinal, alphaNew, betaNew,
sigmaNew, errorFinal, errorNew, NP);
cudaThreadSynchronize();
}
auto end = std::chrono::steady_clock::now();
std::chrono::duration<double> durationCount = end - start;
calTime = durationCount.count();
thrust::device_vector<double>::iterator iter = thrust::min_element(errorFinal.begin(), errorFinal.end());
unsigned int minErrorPosition = iter - errorFinal.begin();
avgError = errorFinal[minErrorPosition] ;
alpha = alphaFinal[minErrorPosition];
beta = betaFinal[minErrorPosition];
sigma = sigmaFinal[minErrorPosition];
std::cout << "Final Error: " << avgError << std::endl;
std::cout << "Final Alpha: " << alpha << std::endl;
std::cout << "Final Beta: " << beta << std::endl;
std::cout << "Final Sigma: " << sigma << std::endl;
std::cout << "Calculation Time: " << calTime << std::endl;
std::cout << "NewR: " << rNext[minErrorPosition] << std::endl;
for (size_t i = 0; i < 9; i++)
crrntMonthMdlDataArray[i] = getYield(maturityArray[i], alpha, beta, sigma, rNext[minErrorPosition], cirFlag);
// return rNext[minErrorPosition];
}
/****************************************************************************/
/******************** Setters and Getters are here **************************/
/****************************************************************************/
DE::DE(std::string m, const double dt)
{
methodName = m;
dtTerm = dt;
}
const double& DE::getAlpha() const { return alpha; }
const double& DE::getBeta() const { return beta; }
const double& DE::getSigma() const { return sigma; }
const double& DE::getError() const { return avgError; }
const int& DE::getIter() const { return gens; }
const double& DE::getTime() const { return calTime; }
const std::array<double, 9>& DE::getMdlArray() const
{
return crrntMonthMdlDataArray;
}
void DE::setMrktArray(std::vector<double> const& mrktData)
{
crrntMonthMrktDataVec = mrktData;
}
|
6baf28b575f1d4d27bfe16562b532aef4c17594e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "logger.h"
#include "pluginKernels.h"
#include "pluginUtil.h"
#include "skipLayerNormPlugin.h"
#include <cassert>
#include <cstring>
#include <vector>
using bert::operator+;
namespace bert
{
template <typename T, unsigned TPB>
__global__ void skipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld)
{
val = input[idx] + skip[idx];
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void skipLayerNormKernel(
const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
hipcub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB)
{
const int idx = offset + i;
const T val = input[idx] + skip[idx];
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <typename T>
int computeSkipLayerNorm(hipStream_t stream, const int ld, const int n, const T* input, const T* skip,
const float* beta, const float* gamma, T* output)
{
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
if (ld <= 32)
{
constexpr int blockSize = 32;
hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output);
}
else if (ld <= 128)
{
constexpr int blockSize = 128;
hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output);
}
else if (ld == 384)
{
constexpr int blockSize = 384;
hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>)
, dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output);
}
else
{
constexpr int blockSize = 256;
hipLaunchKernelGGL(( skipLayerNormKernel<T, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output);
}
CHECK(hipPeekAtLastError());
return 0;
}
using namespace nvinfer1;
// Clip plugin specific constants
namespace
{
static const char* SKIP_LAYER_NORM_VERSION{"1"};
static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPlugin"};
} // namespace
// Static class fields initialization
PluginFieldCollection SkipLayerNormPluginCreator::mFC{};
std::vector<PluginField> SkipLayerNormPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginCreator);
SkipLayerNormPlugin::SkipLayerNormPlugin(
const std::string name, const int ld, const Weights& beta, const Weights& gamma)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
{
}
SkipLayerNormPlugin::SkipLayerNormPlugin(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "Skip LN Deser start\n";
// Deserialize in the same order as serialization
const char* d = static_cast<const char*>(data);
const char* a = d;
DESER(d, mType);
DESER(d, mLd);
DESER(d, mInputVolume);
mBetaDev = deserToDev<float>(d, mLd);
mGammaDev = deserToDev<float>(d, mLd);
assert(d == (a + length));
// this signals init not to allocate/copy
mGamma.count = mLd;
mGamma.values = nullptr;
mBeta.count = mLd;
mBeta.values = nullptr;
gLogVerbose << "Skip LN Deser done\n";
}
const char* SkipLayerNormPlugin::getPluginType() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPlugin::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
int SkipLayerNormPlugin::getNbOutputs() const
{
return 1;
}
Dims SkipLayerNormPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
// Validate input arguments
assert(nbInputDims == 2);
assert(index == 0);
assert(inputs[0].nbDims == inputs[1].nbDims);
for (int d = 0; d < inputs[0].nbDims; d++)
{
assert(inputs[0].d[d] == inputs[1].d[d]);
}
return inputs[0];
}
int SkipLayerNormPlugin::initialize()
{
if (mGamma.values)
{
CHECK(hipMalloc(&mGammaDev, sizeof(float) * mGamma.count));
CHECK(hipMemcpy(mGammaDev, mGamma.values, sizeof(float) * mGamma.count, hipMemcpyHostToDevice));
}
if (mBeta.values)
{
CHECK(hipMalloc(&mBetaDev, sizeof(float) * mBeta.count));
CHECK(hipMemcpy(mBetaDev, mBeta.values, sizeof(float) * mGamma.count, hipMemcpyHostToDevice));
}
return 0;
}
int SkipLayerNormPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void*, hipStream_t stream)
{
int status = -1;
// Our plugin outputs only one tensor
// Launch CUDA kernel wrapper and save its return value
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
const float* skip = static_cast<const float*>(inputs[1]);
float* output = static_cast<float*>(outputs[0]);
status = computeSkipLayerNorm<float>(
stream, mLd, mInputVolume * batchSize, input, skip, mBetaDev, mGammaDev, output);
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
const half* skip = static_cast<const half*>(inputs[1]);
half* output = static_cast<half*>(outputs[0]);
status = computeSkipLayerNorm<half>(
stream, mLd, mInputVolume * batchSize, input, skip, mBetaDev, mGammaDev, output);
}
else
{
assert(false);
}
return status;
}
size_t SkipLayerNormPlugin::getSerializationSize() const
{
return 2 * sizeof(float) * mLd + sizeof(DataType) + sizeof(mLd) + sizeof(mInputVolume);
}
void SkipLayerNormPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer);
const char* a = d;
writeToBuffer(d, mType);
writeToBuffer(d, mLd);
writeToBuffer(d, mInputVolume);
serFromDev(d, mBetaDev, mLd);
serFromDev(d, mGammaDev, mLd);
assert(d == a + getSerializationSize());
}
void SkipLayerNormPlugin::configureWithFormat(
const Dims* inputs, int nbInputs, const Dims* outputs, int nbOutputs, DataType type, PluginFormat format, int)
{
// Validate input arguments
assert(nbOutputs == 1);
assert(nbInputs == 2);
// Fetch volume for future enqueue() operations
size_t volume = 1;
for (int i = 0; i < inputs->nbDims; i++)
{
volume *= inputs->d[i];
}
mInputVolume = volume;
assert(inputs->nbDims == 4);
mLd = inputs->d[1]; // hiddensize
assert(inputs->d[2] == 1);
assert(inputs->d[3] == 1);
mType = type;
}
bool SkipLayerNormPlugin::supportsFormat(DataType type, PluginFormat format) const
{
// This plugin only supports ordinary floats, and NCHW input format
if (type == DataType::kFLOAT || type == DataType::kHALF)
{
return format == PluginFormat::kNCHW;
}
else
{
return false;
}
}
void SkipLayerNormPlugin::terminate()
{
gLogVerbose << "SKIPLN terminate start" << std::endl;
hipFree(mGammaDev);
hipFree(mBetaDev);
gLogVerbose << "SKIPLN terminate done" << std::endl;
}
void SkipLayerNormPlugin::destroy()
{
// This gets called when the network containing plugin is destroyed
delete this;
}
IPluginV2* SkipLayerNormPlugin::clone() const
{
return new SkipLayerNormPlugin(mLayerName, mLd, mBeta, mGamma);
}
void SkipLayerNormPlugin::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPlugin::getPluginNamespace() const
{
return mNamespace.c_str();
}
SkipLayerNormPluginCreator::SkipLayerNormPluginCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* SkipLayerNormPluginCreator::getPluginName() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginCreator::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
const PluginFieldCollection* SkipLayerNormPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* SkipLayerNormPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating SkipLayerNormPluginCreator...\n";
int ld;
Weights beta;
Weights gamma;
for(int i=0; i< fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("ld")==0)
{
ld = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building ld: " << ld << std::endl;
}
if (field_name.compare("beta")==0)
{
gLogVerbose << "Building beta...\n";
beta.values = fc->fields[i].data;
beta.count = fc->fields[i].length;
beta.type = static_cast<DataType>(fc->fields[i].type);
}
if (field_name.compare("gamma")==0)
{
gLogVerbose << "Building gamma...\n";
gamma.values = fc->fields[i].data;
gamma.count = fc->fields[i].length;
gamma.type = static_cast<DataType>(fc->fields[i].type);
}
}
SkipLayerNormPlugin* p = new SkipLayerNormPlugin(name, ld, beta, gamma);
return p;
}
IPluginV2* SkipLayerNormPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call SkipLayerNormPlugin::destroy()
return new SkipLayerNormPlugin(name, serialData, serialLength);
}
void SkipLayerNormPluginCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
}
| 6baf28b575f1d4d27bfe16562b532aef4c17594e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "logger.h"
#include "pluginKernels.h"
#include "pluginUtil.h"
#include "skipLayerNormPlugin.h"
#include <cassert>
#include <cstring>
#include <vector>
using bert::operator+;
namespace bert
{
template <typename T, unsigned TPB>
__global__ void skipLayerNormKernelSmall(
const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < ld)
{
val = input[idx] + skip[idx];
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
}
layerNormSmall<T, TPB>(val, threadData, ld, idx, beta, gamma, output);
}
template <typename T, unsigned TPB>
__global__ void skipLayerNormKernel(
const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output)
{
const T rld = T(1) / T(ld);
const int offset = blockIdx.x * ld;
cub::Sum pairSum;
// reduce x and x^2
kvp<T> threadData(0, 0);
for (int i = threadIdx.x; i < ld; i += TPB)
{
const int idx = offset + i;
const T val = input[idx] + skip[idx];
const T rldval = rld * val;
threadData = pairSum(threadData, kvp<T>(rldval, rldval * val));
output[idx] = val;
}
layerNorm<T, TPB>(threadData, ld, offset, beta, gamma, output);
}
template <typename T>
int computeSkipLayerNorm(cudaStream_t stream, const int ld, const int n, const T* input, const T* skip,
const float* beta, const float* gamma, T* output)
{
// this must be true because n is the total size of the tensor
assert(n % ld == 0);
const int gridSize = n / ld;
if (ld <= 32)
{
constexpr int blockSize = 32;
skipLayerNormKernelSmall<T, blockSize>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output);
}
else if (ld <= 128)
{
constexpr int blockSize = 128;
skipLayerNormKernelSmall<T, blockSize>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output);
}
else if (ld == 384)
{
constexpr int blockSize = 384;
skipLayerNormKernelSmall<T, blockSize>
<<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output);
}
else
{
constexpr int blockSize = 256;
skipLayerNormKernel<T, blockSize><<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output);
}
CHECK(cudaPeekAtLastError());
return 0;
}
using namespace nvinfer1;
// Clip plugin specific constants
namespace
{
static const char* SKIP_LAYER_NORM_VERSION{"1"};
static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPlugin"};
} // namespace
// Static class fields initialization
PluginFieldCollection SkipLayerNormPluginCreator::mFC{};
std::vector<PluginField> SkipLayerNormPluginCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginCreator);
SkipLayerNormPlugin::SkipLayerNormPlugin(
const std::string name, const int ld, const Weights& beta, const Weights& gamma)
: mLayerName(name)
, mLd(ld)
, mGamma(gamma)
, mBeta(beta)
{
}
SkipLayerNormPlugin::SkipLayerNormPlugin(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "Skip LN Deser start\n";
// Deserialize in the same order as serialization
const char* d = static_cast<const char*>(data);
const char* a = d;
DESER(d, mType);
DESER(d, mLd);
DESER(d, mInputVolume);
mBetaDev = deserToDev<float>(d, mLd);
mGammaDev = deserToDev<float>(d, mLd);
assert(d == (a + length));
// this signals init not to allocate/copy
mGamma.count = mLd;
mGamma.values = nullptr;
mBeta.count = mLd;
mBeta.values = nullptr;
gLogVerbose << "Skip LN Deser done\n";
}
const char* SkipLayerNormPlugin::getPluginType() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPlugin::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
int SkipLayerNormPlugin::getNbOutputs() const
{
return 1;
}
Dims SkipLayerNormPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
// Validate input arguments
assert(nbInputDims == 2);
assert(index == 0);
assert(inputs[0].nbDims == inputs[1].nbDims);
for (int d = 0; d < inputs[0].nbDims; d++)
{
assert(inputs[0].d[d] == inputs[1].d[d]);
}
return inputs[0];
}
int SkipLayerNormPlugin::initialize()
{
if (mGamma.values)
{
CHECK(cudaMalloc(&mGammaDev, sizeof(float) * mGamma.count));
CHECK(cudaMemcpy(mGammaDev, mGamma.values, sizeof(float) * mGamma.count, cudaMemcpyHostToDevice));
}
if (mBeta.values)
{
CHECK(cudaMalloc(&mBetaDev, sizeof(float) * mBeta.count));
CHECK(cudaMemcpy(mBetaDev, mBeta.values, sizeof(float) * mGamma.count, cudaMemcpyHostToDevice));
}
return 0;
}
int SkipLayerNormPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void*, cudaStream_t stream)
{
int status = -1;
// Our plugin outputs only one tensor
// Launch CUDA kernel wrapper and save its return value
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
const float* skip = static_cast<const float*>(inputs[1]);
float* output = static_cast<float*>(outputs[0]);
status = computeSkipLayerNorm<float>(
stream, mLd, mInputVolume * batchSize, input, skip, mBetaDev, mGammaDev, output);
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
const half* skip = static_cast<const half*>(inputs[1]);
half* output = static_cast<half*>(outputs[0]);
status = computeSkipLayerNorm<half>(
stream, mLd, mInputVolume * batchSize, input, skip, mBetaDev, mGammaDev, output);
}
else
{
assert(false);
}
return status;
}
size_t SkipLayerNormPlugin::getSerializationSize() const
{
return 2 * sizeof(float) * mLd + sizeof(DataType) + sizeof(mLd) + sizeof(mInputVolume);
}
void SkipLayerNormPlugin::serialize(void* buffer) const
{
char* d = static_cast<char*>(buffer);
const char* a = d;
writeToBuffer(d, mType);
writeToBuffer(d, mLd);
writeToBuffer(d, mInputVolume);
serFromDev(d, mBetaDev, mLd);
serFromDev(d, mGammaDev, mLd);
assert(d == a + getSerializationSize());
}
void SkipLayerNormPlugin::configureWithFormat(
const Dims* inputs, int nbInputs, const Dims* outputs, int nbOutputs, DataType type, PluginFormat format, int)
{
// Validate input arguments
assert(nbOutputs == 1);
assert(nbInputs == 2);
// Fetch volume for future enqueue() operations
size_t volume = 1;
for (int i = 0; i < inputs->nbDims; i++)
{
volume *= inputs->d[i];
}
mInputVolume = volume;
assert(inputs->nbDims == 4);
mLd = inputs->d[1]; // hiddensize
assert(inputs->d[2] == 1);
assert(inputs->d[3] == 1);
mType = type;
}
bool SkipLayerNormPlugin::supportsFormat(DataType type, PluginFormat format) const
{
// This plugin only supports ordinary floats, and NCHW input format
if (type == DataType::kFLOAT || type == DataType::kHALF)
{
return format == PluginFormat::kNCHW;
}
else
{
return false;
}
}
void SkipLayerNormPlugin::terminate()
{
gLogVerbose << "SKIPLN terminate start" << std::endl;
cudaFree(mGammaDev);
cudaFree(mBetaDev);
gLogVerbose << "SKIPLN terminate done" << std::endl;
}
void SkipLayerNormPlugin::destroy()
{
// This gets called when the network containing plugin is destroyed
delete this;
}
IPluginV2* SkipLayerNormPlugin::clone() const
{
return new SkipLayerNormPlugin(mLayerName, mLd, mBeta, mGamma);
}
void SkipLayerNormPlugin::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPlugin::getPluginNamespace() const
{
return mNamespace.c_str();
}
SkipLayerNormPluginCreator::SkipLayerNormPluginCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* SkipLayerNormPluginCreator::getPluginName() const
{
return SKIP_LAYER_NORM_NAME;
}
const char* SkipLayerNormPluginCreator::getPluginVersion() const
{
return SKIP_LAYER_NORM_VERSION;
}
const PluginFieldCollection* SkipLayerNormPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* SkipLayerNormPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating SkipLayerNormPluginCreator...\n";
int ld;
Weights beta;
Weights gamma;
for(int i=0; i< fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("ld")==0)
{
ld = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building ld: " << ld << std::endl;
}
if (field_name.compare("beta")==0)
{
gLogVerbose << "Building beta...\n";
beta.values = fc->fields[i].data;
beta.count = fc->fields[i].length;
beta.type = static_cast<DataType>(fc->fields[i].type);
}
if (field_name.compare("gamma")==0)
{
gLogVerbose << "Building gamma...\n";
gamma.values = fc->fields[i].data;
gamma.count = fc->fields[i].length;
gamma.type = static_cast<DataType>(fc->fields[i].type);
}
}
SkipLayerNormPlugin* p = new SkipLayerNormPlugin(name, ld, beta, gamma);
return p;
}
IPluginV2* SkipLayerNormPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call SkipLayerNormPlugin::destroy()
return new SkipLayerNormPlugin(name, serialData, serialLength);
}
void SkipLayerNormPluginCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* SkipLayerNormPluginCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
}
|
ae80523aff98fcdada527750d256ecc76138fe56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t N = 8ULL*1024ULL*1024ULL; // data size
const int BLOCK_SIZE = 256; // CUDA maximum is 1024
__global__ void reduce(float *gdata, float *out, size_t n){
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
while (idx < n) { // grid stride loop to load data
sdata[tid] += gdata[idx];
idx += gridDim.x*blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
int main(){
float *h_A, *h_sum, *d_A, *d_sums;
const int blocks = 640;
h_A = new float[N]; // allocate space for data in host memory
h_sum = new float;
float max_val = 5.0f;
for (size_t i = 0; i < N; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
h_A[100] = max_val;
hipMalloc(&d_A, N*sizeof(float)); // allocate device space for A
hipMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums
cudaCheckErrors("hipMalloc failure"); // error checking
// copy matrix A to device:
hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
//cuda processing sequence step 1 is complete
hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_A, d_sums, N); // reduce stage 1
cudaCheckErrors("reduction kernel launch failure");
hipLaunchKernelGGL(( reduce), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_sums, d_A, blocks); // reduce stage 2
cudaCheckErrors("reduction kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
hipMemcpy(h_sum, d_A, sizeof(float), hipMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("reduction w/atomic kernel execution failure or hipMemcpy D2H failure");
printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val);
return 0;
}
| ae80523aff98fcdada527750d256ecc76138fe56.cu | #include <stdio.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const size_t N = 8ULL*1024ULL*1024ULL; // data size
const int BLOCK_SIZE = 256; // CUDA maximum is 1024
__global__ void reduce(float *gdata, float *out, size_t n){
__shared__ float sdata[BLOCK_SIZE];
int tid = threadIdx.x;
sdata[tid] = 0.0f;
size_t idx = threadIdx.x+blockDim.x*blockIdx.x;
while (idx < n) { // grid stride loop to load data
sdata[tid] += gdata[idx];
idx += gridDim.x*blockDim.x;
}
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
__syncthreads();
if (tid < s) // parallel sweep reduction
sdata[tid] += sdata[tid + s];
}
if (tid == 0) out[blockIdx.x] = sdata[0];
}
int main(){
float *h_A, *h_sum, *d_A, *d_sums;
const int blocks = 640;
h_A = new float[N]; // allocate space for data in host memory
h_sum = new float;
float max_val = 5.0f;
for (size_t i = 0; i < N; i++) // initialize matrix in host memory
h_A[i] = 1.0f;
h_A[100] = max_val;
cudaMalloc(&d_A, N*sizeof(float)); // allocate device space for A
cudaMalloc(&d_sums, blocks*sizeof(float)); // allocate device space for partial sums
cudaCheckErrors("cudaMalloc failure"); // error checking
// copy matrix A to device:
cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
//cuda processing sequence step 1 is complete
reduce<<<blocks, BLOCK_SIZE>>>(d_A, d_sums, N); // reduce stage 1
cudaCheckErrors("reduction kernel launch failure");
reduce<<<1, BLOCK_SIZE>>>(d_sums, d_A, blocks); // reduce stage 2
cudaCheckErrors("reduction kernel launch failure");
//cuda processing sequence step 2 is complete
// copy vector sums from device to host:
cudaMemcpy(h_sum, d_A, sizeof(float), cudaMemcpyDeviceToHost);
//cuda processing sequence step 3 is complete
cudaCheckErrors("reduction w/atomic kernel execution failure or cudaMemcpy D2H failure");
printf("reduction output: %f, expected sum reduction output: %f, expected max reduction output: %f\n", *h_sum, (float)((N-1)+max_val), max_val);
return 0;
}
|
4d487252e66d859e4a940ddc24defbd15bd5b8d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*------------------------------------------------------------------------------
Copyright 2016 by Nicola Bombieri
Mangrove is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include "Inference/Generator.hpp"
#include "Printer/InvariantPrinter.hpp"
#include "Inference/ResultCollector.hpp"
#include "Inference/CheckBinaryMonotony.hpp"
#include "Inference/CheckTernaryMonotony.hpp"
#include "Mining/Mining.hpp"
#include "Mining/Device/impl/GPUHelper.cuh"
#include "Mining/Device/GPUMiningFixed.cuh"
#include "Mining/Device/Kernels/NumericRange.cuh"
#include "Mining/Host/HostMiningFixed.hpp"
#include "XLib.hpp"
#include "config.cuh"
using namespace xlib;
using namespace timer;
namespace mangrove {
AutoTuning(GPUNumericRange)
template<>
void GPUMiningFixed<numeric_t>(const GPUParamStr& GPUParam,
const TracePropSTR<numeric_t>& TraceProp) {
result_t* devResultPTR;
hipGetSymbolAddress((void**) &devResultPTR, devResult);
using PROP_BASE = PROPERTY<128>;
using PROP_NB = PROPERTY<128, 1, NB>;
using PROP_NT = PROPERTY<256, 1, NT>;
Timer_cuda TM(1, 30, Color::FG_L_RED);
//--------------------------------------------------------------------------
ResultCollector<numeric_t> results_numeric(TraceProp.vars);
Generator<numeric_t> generator(TraceProp.vars);
// Unary Range
//--------------------------------------------------------------------------
devTraceSTR<numeric_t> devProp(TraceProp);
/*
#if defined(AUTO_TUNING)
AutoTuningClass::Init(TraceProp.vars);
AutoTuningGPUNumericRange<PROP_BASE>::Apply(devProp);
#endif*/
unsigned gridDim = gridConfig(GPUNumericRange<PROP_BASE>,
PROP_BASE::BlockSize, 0, TraceProp.vars);
TM.start();
hipLaunchKernelGGL(( GPUNumericRange<PROP_BASE>), dim3(gridDim), dim3(PROP_BASE::BlockSize), 0, 0, devProp);
TM.getTimeA("Unary Range");
__CUDA_ERROR("GPURangeChecking");
support::numericRangeCheck(devResultPTR, TraceProp, GPUParam.check_result);
hipMemcpyFromSymbol(results_numeric.getVectorResult<1>(), devResult,
static_cast<size_t>(TraceProp.vars) * sizeof(float2));
// Monotony checking
//==========================================================================
#if defined(NUMERIC_INFERENCE)
CheckBinaryMonotony<NBtemplate::size> (results_numeric, TraceProp.vars);
CheckTernaryMonotony<NTtemplate::size> (results_numeric, TraceProp.vars);
#endif
// Binary Mining
// =========================================================================
// Generate FORWARD binary dictionary
generator.setForwardDirection(true);
results_numeric.setForwardDirection(true);
// -------------------------------------------------------------------------
int forward_dictionary_size = generator.generator<2>(results_numeric);
entry_t* dictionary2F = generator.dictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Forward Dictionary size : "
<< forward_dictionary_size << std::endl << std::endl;
// launch FORWARD binary mining
result_t* binaryResult = static_cast<result_t*>(
results_numeric.getVectorResult<2>());
GPUHelper<PROP_NB>(binaryResult, dictionary2F, forward_dictionary_size,
TraceProp);
// Generate BACKWARD binary dictionary
generator.setForwardDirection(false);
results_numeric.setForwardDirection(true);
// -------------------------------------------------------------------------
int backward_dictionary_size = generator.generator<2>(results_numeric);
entry_t* dictionary2B = generator.dictionary + halfNumericDictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Backward Dictionary size : "
<< backward_dictionary_size << std::endl << std::endl;
// launch BACKWARD binary mining
results_numeric.setForwardDirection(false);
binaryResult = static_cast<result_t*>(results_numeric.getVectorResult<2>());
GPUHelper<PROP_NB>(binaryResult, dictionary2B, backward_dictionary_size,
TraceProp);
generator.setForwardDirection(true);
results_numeric.setForwardDirection(true);
#if defined(NUMERIC_INFERENCE)
support::MiningEquivalenceSets(results_numeric,
dictionary_ptr_t<2>(dictionary2F),
forward_dictionary_size);
#endif
// Ternary Mining
// -------------------------------------------------------------------------
int dictionary_size = generator.generator<3>(results_numeric);
entry_t *dictionaryT = generator.dictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Ternary Dictionary size : " << dictionary_size
<< std::endl << std::endl;
result_t *ternaryResult = static_cast<result_t*>(
results_numeric.getVectorResult<3>());
GPUHelper<PROP_NT>(ternaryResult, dictionaryT, dictionary_size, TraceProp);
// Print Invariants
//--------------------------------------------------------------------------
std::ofstream stream;
if (GPUParam.output_file)
stream.open(GPUParam.output_file);
InvariantPrinter<numeric_t> printer_numeric(GPUParam, TraceProp.vars);
std::cout << " Unary Result: "
<< printer_numeric.unaryInvariants(results_numeric, stream)
<< std::endl;
std::cout << " Binary Result: "
<< printer_numeric.binaryInvariants(results_numeric, stream)
<< std::endl;
std::cout << "Ternary Result: "
<< printer_numeric.ternaryInvariants(results_numeric, stream)
<< std::endl;
}
namespace support {
void numericRangeCheck(result_t* devResult,
const TracePropSTR<numeric_t>& TraceProp,
bool check_result) {
if (check_result) {
float2* NumericRange = new float2[TraceProp.vars];
hipMemcpy(NumericRange, devResult,
static_cast<size_t>(TraceProp.vars) * sizeof(float2),
hipMemcpyDeviceToHost);
numeric_t* host_trace_var = TraceProp.host_trace;
for (int V = 0; V < TraceProp.vars; V++) {
auto minmax = std::minmax_element(host_trace_var,
host_trace_var + TraceProp.trace_length);
if (NumericRange[V].x != *minmax.first) {
__ERROR("Var: " << V << " -> NumericRange Min: "
<< std::setprecision(10) << std::fixed
<< NumericRange[V].x << " " << *minmax.first);
}
if (NumericRange[V].y != *minmax.second) {
__ERROR("Var: " << V << " -> NumericRange Max: "
<< std::setprecision(10) << std::fixed
<< NumericRange[V].y << " " << *minmax.second);
}
host_trace_var += TraceProp.trace_length;
}
std::cout << __func__ << ": OK" << std::endl << std::endl;
delete[] NumericRange;
}
}
} //@support
} //@mangrove
| 4d487252e66d859e4a940ddc24defbd15bd5b8d4.cu | /*------------------------------------------------------------------------------
Copyright © 2016 by Nicola Bombieri
Mangrove is provided under the terms of The MIT License (MIT):
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------*/
/**
* @author Federico Busato
* Univerity of Verona, Dept. of Computer Science
* [email protected]
*/
#include "Inference/Generator.hpp"
#include "Printer/InvariantPrinter.hpp"
#include "Inference/ResultCollector.hpp"
#include "Inference/CheckBinaryMonotony.hpp"
#include "Inference/CheckTernaryMonotony.hpp"
#include "Mining/Mining.hpp"
#include "Mining/Device/impl/GPUHelper.cuh"
#include "Mining/Device/GPUMiningFixed.cuh"
#include "Mining/Device/Kernels/NumericRange.cuh"
#include "Mining/Host/HostMiningFixed.hpp"
#include "XLib.hpp"
#include "config.cuh"
using namespace xlib;
using namespace timer;
namespace mangrove {
AutoTuning(GPUNumericRange)
template<>
void GPUMiningFixed<numeric_t>(const GPUParamStr& GPUParam,
const TracePropSTR<numeric_t>& TraceProp) {
result_t* devResultPTR;
cudaGetSymbolAddress((void**) &devResultPTR, devResult);
using PROP_BASE = PROPERTY<128>;
using PROP_NB = PROPERTY<128, 1, NB>;
using PROP_NT = PROPERTY<256, 1, NT>;
Timer_cuda TM(1, 30, Color::FG_L_RED);
//--------------------------------------------------------------------------
ResultCollector<numeric_t> results_numeric(TraceProp.vars);
Generator<numeric_t> generator(TraceProp.vars);
// Unary Range
//--------------------------------------------------------------------------
devTraceSTR<numeric_t> devProp(TraceProp);
/*
#if defined(AUTO_TUNING)
AutoTuningClass::Init(TraceProp.vars);
AutoTuningGPUNumericRange<PROP_BASE>::Apply(devProp);
#endif*/
unsigned gridDim = gridConfig(GPUNumericRange<PROP_BASE>,
PROP_BASE::BlockSize, 0, TraceProp.vars);
TM.start();
GPUNumericRange<PROP_BASE><<<gridDim, PROP_BASE::BlockSize>>>(devProp);
TM.getTimeA("Unary Range");
__CUDA_ERROR("GPURangeChecking");
support::numericRangeCheck(devResultPTR, TraceProp, GPUParam.check_result);
cudaMemcpyFromSymbol(results_numeric.getVectorResult<1>(), devResult,
static_cast<size_t>(TraceProp.vars) * sizeof(float2));
// Monotony checking
//==========================================================================
#if defined(NUMERIC_INFERENCE)
CheckBinaryMonotony<NBtemplate::size> (results_numeric, TraceProp.vars);
CheckTernaryMonotony<NTtemplate::size> (results_numeric, TraceProp.vars);
#endif
// Binary Mining
// =========================================================================
// Generate FORWARD binary dictionary
generator.setForwardDirection(true);
results_numeric.setForwardDirection(true);
// -------------------------------------------------------------------------
int forward_dictionary_size = generator.generator<2>(results_numeric);
entry_t* dictionary2F = generator.dictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Forward Dictionary size : "
<< forward_dictionary_size << std::endl << std::endl;
// launch FORWARD binary mining
result_t* binaryResult = static_cast<result_t*>(
results_numeric.getVectorResult<2>());
GPUHelper<PROP_NB>(binaryResult, dictionary2F, forward_dictionary_size,
TraceProp);
// Generate BACKWARD binary dictionary
generator.setForwardDirection(false);
results_numeric.setForwardDirection(true);
// -------------------------------------------------------------------------
int backward_dictionary_size = generator.generator<2>(results_numeric);
entry_t* dictionary2B = generator.dictionary + halfNumericDictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Backward Dictionary size : "
<< backward_dictionary_size << std::endl << std::endl;
// launch BACKWARD binary mining
results_numeric.setForwardDirection(false);
binaryResult = static_cast<result_t*>(results_numeric.getVectorResult<2>());
GPUHelper<PROP_NB>(binaryResult, dictionary2B, backward_dictionary_size,
TraceProp);
generator.setForwardDirection(true);
results_numeric.setForwardDirection(true);
#if defined(NUMERIC_INFERENCE)
support::MiningEquivalenceSets(results_numeric,
dictionary_ptr_t<2>(dictionary2F),
forward_dictionary_size);
#endif
// Ternary Mining
// -------------------------------------------------------------------------
int dictionary_size = generator.generator<3>(results_numeric);
entry_t *dictionaryT = generator.dictionary;
std::cout << "(Numeric)" << std::setw(30)
<< "Ternary Dictionary size : " << dictionary_size
<< std::endl << std::endl;
result_t *ternaryResult = static_cast<result_t*>(
results_numeric.getVectorResult<3>());
GPUHelper<PROP_NT>(ternaryResult, dictionaryT, dictionary_size, TraceProp);
// Print Invariants
//--------------------------------------------------------------------------
std::ofstream stream;
if (GPUParam.output_file)
stream.open(GPUParam.output_file);
InvariantPrinter<numeric_t> printer_numeric(GPUParam, TraceProp.vars);
std::cout << " Unary Result: "
<< printer_numeric.unaryInvariants(results_numeric, stream)
<< std::endl;
std::cout << " Binary Result: "
<< printer_numeric.binaryInvariants(results_numeric, stream)
<< std::endl;
std::cout << "Ternary Result: "
<< printer_numeric.ternaryInvariants(results_numeric, stream)
<< std::endl;
}
namespace support {
void numericRangeCheck(result_t* devResult,
const TracePropSTR<numeric_t>& TraceProp,
bool check_result) {
if (check_result) {
float2* NumericRange = new float2[TraceProp.vars];
cudaMemcpy(NumericRange, devResult,
static_cast<size_t>(TraceProp.vars) * sizeof(float2),
cudaMemcpyDeviceToHost);
numeric_t* host_trace_var = TraceProp.host_trace;
for (int V = 0; V < TraceProp.vars; V++) {
auto minmax = std::minmax_element(host_trace_var,
host_trace_var + TraceProp.trace_length);
if (NumericRange[V].x != *minmax.first) {
__ERROR("Var: " << V << " -> NumericRange Min: "
<< std::setprecision(10) << std::fixed
<< NumericRange[V].x << " " << *minmax.first);
}
if (NumericRange[V].y != *minmax.second) {
__ERROR("Var: " << V << " -> NumericRange Max: "
<< std::setprecision(10) << std::fixed
<< NumericRange[V].y << " " << *minmax.second);
}
host_trace_var += TraceProp.trace_length;
}
std::cout << __func__ << ": OK" << std::endl << std::endl;
delete[] NumericRange;
}
}
} //@support
} //@mangrove
|
69439bbbffd4c530c60aef0bff5bffe3986ba304.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GLFW_INCLUDE_VULKAN
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#ifdef _WIN64
#include <vulkan/vulkan_win32.h>
#endif
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <helper_math.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
const int MAX_FRAMES = 4;
const std::vector<const char*> validationLayers = {
"VK_LAYER_KHRONOS_validation"};
#ifdef NDEBUG
const bool enableValidationLayers = false;
#else
const bool enableValidationLayers = false;
#endif
std::string execution_path;
VkResult CreateDebugUtilsMessengerEXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugUtilsMessengerEXT* pDebugMessenger) {
auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugUtilsMessengerEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
void DestroyDebugUtilsMessengerEXT(VkInstance instance,
VkDebugUtilsMessengerEXT debugMessenger,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugUtilsMessengerEXT");
if (func != nullptr) {
func(instance, debugMessenger, pAllocator);
}
}
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
typedef float vec2[2];
struct Vertex {
vec4 pos;
vec3 color;
vec2 texCoord;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 3>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
};
struct UniformBufferObject {
alignas(16) mat4x4 model;
alignas(16) mat4x4 view;
alignas(16) mat4x4 proj;
};
const std::vector<Vertex> vertices = {
{{-1.0f, -1.0f, 0.0f, 1.0f}, {1.0f, 0.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, -1.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {1.0f, 1.0f}},
{{-1.0f, 1.0f, 0.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {0.0f, 1.0f}}};
const std::vector<uint16_t> indices = {0, 1, 2, 2, 3, 0};
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c) {
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c >> 8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c >> 16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c >> 24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
int filter_radius = 14;
int g_nFilterSign = 1;
// This varies the filter radius, so we can see automatic animation
void varySigma() {
filter_radius += g_nFilterSign;
if (filter_radius > 64) {
filter_radius = 64; // clamp to 64 and then negate sign
g_nFilterSign = -1;
} else if (filter_radius < 0) {
filter_radius = 0;
g_nFilterSign = 1;
}
}
// row pass using texture lookups
__global__ void d_boxfilter_rgba_x(hipSurfaceObject_t* dstSurfMipMapArray,
hipTextureObject_t textureMipMapInput,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
float scale = 1.0f / (float)((filter_radius << 1) + 1);
unsigned int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y < baseHeight) {
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (y < height && filter_radius < width) {
float px = 1.0 / width;
float py = 1.0 / height;
float4 t = make_float4(0.0f);
for (int x = -filter_radius; x <= filter_radius; x++) {
t += tex2DLod<float4>(textureMipMapInput, x * px, y * py,
(float)mipLevelIdx);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], 0, y);
for (int x = 1; x < width; x++) {
t += tex2DLod<float4>(textureMipMapInput, (x + filter_radius) * px,
y * py, (float)mipLevelIdx);
t -=
tex2DLod<float4>(textureMipMapInput, (x - filter_radius - 1) * px,
y * py, (float)mipLevelIdx);
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx],
x * sizeof(uchar4), y);
}
}
}
}
}
// column pass using coalesced global memory reads
__global__ void d_boxfilter_rgba_y(hipSurfaceObject_t* dstSurfMipMapArray,
hipSurfaceObject_t* srcSurfMipMapArray,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)((filter_radius << 1) + 1);
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (x < width && height > filter_radius) {
float4 t;
// do left edge
int colInBytes = x * sizeof(uchar4);
unsigned int pixFirst = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, 0);
t = rgbaIntToFloat(pixFirst) * filter_radius;
for (int y = 0; (y < (filter_radius + 1)) && (y < height); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y);
t += rgbaIntToFloat(pix);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, 0);
for (int y = 1; (y < filter_radius + 1) && ((y + filter_radius) < height);
y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
t -= rgbaIntToFloat(pixFirst);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// main loop
for (int y = (filter_radius + 1); y < (height - filter_radius); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
pix = surf2Dread<unsigned int>(srcSurfMipMapArray[mipLevelIdx],
colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// do right edge
unsigned int pixLast = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, height - 1);
for (int y = height - filter_radius;
(y < height) && ((y - filter_radius - 1) > 1); y++) {
t += rgbaIntToFloat(pixLast);
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
}
}
}
class vulkanImageCUDA {
public:
void loadImageData(const std::string& filename) {
// load image (needed so we can get the width and height before we create
// the window
char* image_path =
sdkFindFilePath(filename.c_str(), execution_path.c_str());
if (image_path == 0) {
printf("Error finding image file '%s'\n", filename.c_str());
exit(EXIT_FAILURE);
}
sdkLoadPPM4(image_path, (unsigned char**)&image_data, &imageWidth,
&imageHeight);
if (!image_data) {
printf("Error opening file '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, imageWidth,
imageHeight);
}
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkDebugUtilsMessengerEXT debugMessenger;
VkSurfaceKHR surface;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkRenderPass renderPass;
VkDescriptorSetLayout descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline graphicsPipeline;
VkCommandPool commandPool;
VkImage textureImage;
VkDeviceMemory textureImageMemory;
VkImageView textureImageView;
VkSampler textureSampler;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
std::vector<VkBuffer> uniformBuffers;
std::vector<VkDeviceMemory> uniformBuffersMemory;
VkDescriptorPool descriptorPool;
std::vector<VkDescriptorSet> descriptorSets;
std::vector<VkCommandBuffer> commandBuffers;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
VkSemaphore cudaUpdateVkSemaphore, vkUpdateCudaSemaphore;
std::vector<VkFence> inFlightFences;
size_t currentFrame = 0;
bool framebufferResized = false;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR = NULL;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR = NULL;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
unsigned int* image_data = NULL;
unsigned int imageWidth, imageHeight;
unsigned int mipLevels;
size_t totalImageMemSize;
// CUDA objects
cudaExternalMemory_t cudaExtMemImageBuffer;
hipMipmappedArray_t cudaMipmappedImageArray, cudaMipmappedImageArrayTemp,
cudaMipmappedImageArrayOrig;
std::vector<hipSurfaceObject_t> surfaceObjectList, surfaceObjectListTemp;
hipSurfaceObject_t *d_surfaceObjectList, *d_surfaceObjectListTemp;
hipTextureObject_t textureObjMipMapInput;
cudaExternalSemaphore_t cudaExtCudaUpdateVkSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaSemaphore;
hipStream_t streamToRun;
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Image CUDA Box Filter",
nullptr, nullptr);
glfwSetWindowUserPointer(window, this);
glfwSetFramebufferSizeCallback(window, framebufferResizeCallback);
}
static void framebufferResizeCallback(GLFWwindow* window, int width,
int height) {
auto app =
reinterpret_cast<vulkanImageCUDA*>(glfwGetWindowUserPointer(window));
app->framebufferResized = true;
}
void initVulkan() {
createInstance();
setupDebugMessenger();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createTextureImage();
createTextureImageView();
createTextureSampler();
createVertexBuffer();
createIndexBuffer();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
checkCudaErrors(hipStreamCreate(&streamToRun));
cudaVkImportImageMem();
cudaVkImportSemaphore();
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void cleanupSwapChain() {
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
vkFreeCommandBuffers(device, commandPool,
static_cast<uint32_t>(commandBuffers.size()),
commandBuffers.data());
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroySwapchainKHR(device, swapChain, nullptr);
for (size_t i = 0; i < swapChainImages.size(); i++) {
vkDestroyBuffer(device, uniformBuffers[i], nullptr);
vkFreeMemory(device, uniformBuffersMemory[i], nullptr);
}
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
}
void cleanup() {
cleanupSwapChain();
vkDestroySampler(device, textureSampler, nullptr);
vkDestroyImageView(device, textureImageView, nullptr);
for (int i = 0; i < mipLevels; i++) {
checkCudaErrors(hipDestroySurfaceObject(surfaceObjectList[i]));
checkCudaErrors(hipDestroySurfaceObject(surfaceObjectListTemp[i]));
}
checkCudaErrors(hipFree(d_surfaceObjectList));
checkCudaErrors(hipFree(d_surfaceObjectListTemp));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArrayTemp));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArrayOrig));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArray));
checkCudaErrors(hipDestroyTextureObject(textureObjMipMapInput));
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemImageBuffer));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkSemaphore));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaSemaphore));
vkDestroyImage(device, textureImage, nullptr);
vkFreeMemory(device, textureImageMemory, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, indexBuffer, nullptr);
vkFreeMemory(device, indexBufferMemory, nullptr);
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
for (size_t i = 0; i < MAX_FRAMES; i++) {
vkDestroySemaphore(device, renderFinishedSemaphores[i], nullptr);
vkDestroySemaphore(device, imageAvailableSemaphores[i], nullptr);
vkDestroyFence(device, inFlightFences[i], nullptr);
}
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyDevice(device, nullptr);
if (enableValidationLayers) {
DestroyDebugUtilsMessengerEXT(instance, debugMessenger, nullptr);
}
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
void recreateSwapChain() {
int width = 0, height = 0;
while (width == 0 || height == 0) {
glfwGetFramebufferSize(window, &width, &height);
glfwWaitEvents();
}
vkDeviceWaitIdle(device);
cleanupSwapChain();
createSwapChain();
createImageViews();
createRenderPass();
createGraphicsPipeline();
createFramebuffers();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan Image CUDA Interop";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
auto extensions = getRequiredExtensions();
createInfo.enabledExtensionCount = static_cast<uint32_t>(extensions.size());
createInfo.ppEnabledExtensionNames = extensions.data();
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo;
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
populateDebugMessengerCreateInfo(debugCreateInfo);
createInfo.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debugCreateInfo;
} else {
createInfo.enabledLayerCount = 0;
createInfo.pNext = nullptr;
}
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
} else {
std::cout << "Vulkan proc address for vkGetMemoryFdKHR - "
<< fpGetMemoryFdKHR << std::endl;
}
#endif
}
void populateDebugMessengerCreateInfo(
VkDebugUtilsMessengerCreateInfoEXT& createInfo) {
createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugCallback;
}
void setupDebugMessenger() {
if (!enableValidationLayers) return;
VkDebugUtilsMessengerCreateInfoEXT createInfo;
populateDebugMessengerCreateInfo(createInfo);
if (CreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr,
&debugMessenger) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug messenger!");
}
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
hipGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != hipComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(hipSetDevice(current_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
swapChainImageViews[i] =
createImageView(swapChainImages[i], swapChainImageFormat);
}
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.pImmutableSamplers = nullptr;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkDescriptorSetLayoutBinding samplerLayoutBinding = {};
samplerLayoutBinding.binding = 1;
samplerLayoutBinding.descriptorCount = 1;
samplerLayoutBinding.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
samplerLayoutBinding.pImmutableSamplers = nullptr;
samplerLayoutBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
std::array<VkDescriptorSetLayoutBinding, 2> bindings = {
uboLayoutBinding, samplerLayoutBinding};
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = static_cast<uint32_t>(bindings.size());
layoutInfo.pBindings = bindings.data();
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader.vert");
auto fragShaderCode = readFile("shader.frag");
VkShaderModule vertShaderModule = createShaderModule(vertShaderCode);
VkShaderModule fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f;
colorBlending.blendConstants[1] = 0.0f;
colorBlending.blendConstants[2] = 0.0f;
colorBlending.blendConstants[3] = 0.0f;
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout;
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE;
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create graphics command pool!");
}
}
void createTextureImage() {
VkDeviceSize imageSize = imageWidth * imageHeight * 4;
mipLevels = static_cast<uint32_t>(
::floor(std::log2(::max(imageWidth, imageHeight)))) +
1;
printf("mipLevels = %d\n", mipLevels);
if (!image_data) {
throw std::runtime_error("failed to load texture image!");
}
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, imageSize, 0, &data);
memcpy(data, image_data, static_cast<size_t>(imageSize));
vkUnmapMemory(device, stagingBufferMemory);
// VK_FORMAT_R8G8B8A8_UNORM changed to VK_FORMAT_R8G8B8A8_UINT
createImage(
imageWidth, imageHeight, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, textureImage, textureImageMemory);
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copyBufferToImage(stagingBuffer, textureImage,
static_cast<uint32_t>(imageWidth),
static_cast<uint32_t>(imageHeight));
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
generateMipmaps(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void generateMipmaps(VkImage image, VkFormat imageFormat) {
VkFormatProperties formatProperties;
vkGetPhysicalDeviceFormatProperties(physicalDevice, imageFormat,
&formatProperties);
if (!(formatProperties.optimalTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
throw std::runtime_error(
"texture image format does not support linear blitting!");
}
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = image;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.subresourceRange.levelCount = 1;
int32_t mipWidth = imageWidth;
int32_t mipHeight = imageHeight;
for (uint32_t i = 1; i < mipLevels; i++) {
barrier.subresourceRange.baseMipLevel = i - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
VkImageBlit blit = {};
blit.srcOffsets[0] = {0, 0, 0};
blit.srcOffsets[1] = {mipWidth, mipHeight, 1};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.mipLevel = i - 1;
blit.srcSubresource.baseArrayLayer = 0;
blit.srcSubresource.layerCount = 1;
blit.dstOffsets[0] = {0, 0, 0};
blit.dstOffsets[1] = {mipWidth > 1 ? mipWidth / 2 : 1,
mipHeight > 1 ? mipHeight / 2 : 1, 1};
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.mipLevel = i;
blit.dstSubresource.baseArrayLayer = 0;
blit.dstSubresource.layerCount = 1;
vkCmdBlitImage(commandBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit,
VK_FILTER_LINEAR);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
if (mipWidth > 1) mipWidth /= 2;
if (mipHeight > 1) mipHeight /= 2;
}
barrier.subresourceRange.baseMipLevel = mipLevels - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
#ifdef _WIN64 // For windows
HANDLE getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = textureImageMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = textureImageMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void createTextureImageView() {
textureImageView = createImageView(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void createTextureSampler() {
VkSamplerCreateInfo samplerInfo = {};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.anisotropyEnable = VK_TRUE;
samplerInfo.maxAnisotropy = 16;
samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
samplerInfo.unnormalizedCoordinates = VK_FALSE;
samplerInfo.compareEnable = VK_FALSE;
samplerInfo.compareOp = VK_COMPARE_OP_ALWAYS;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = 0; // Optional
samplerInfo.maxLod = static_cast<float>(mipLevels);
samplerInfo.mipLodBias = 0; // Optional
if (vkCreateSampler(device, &samplerInfo, nullptr, &textureSampler) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture sampler!");
}
}
VkImageView createImageView(VkImage image, VkFormat format) {
VkImageViewCreateInfo viewInfo = {};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = image;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewInfo.format = format;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = mipLevels;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
VkImageView imageView;
if (vkCreateImageView(device, &viewInfo, nullptr, &imageView) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture image view!");
}
return imageView;
}
void createImage(uint32_t width, uint32_t height, VkFormat format,
VkImageTiling tiling, VkImageUsageFlags usage,
VkMemoryPropertyFlags properties, VkImage& image,
VkDeviceMemory& imageMemory) {
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.extent.width = width;
imageInfo.extent.height = height;
imageInfo.extent.depth = 1;
imageInfo.mipLevels = mipLevels;
imageInfo.arrayLayers = 1;
imageInfo.format = format;
imageInfo.tiling = tiling;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.usage = usage;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryImageCreateInfo vkExternalMemImageCreateInfo = {};
vkExternalMemImageCreateInfo.sType =
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
vkExternalMemImageCreateInfo.pNext = NULL;
vkExternalMemImageCreateInfo.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
imageInfo.pNext = &vkExternalMemImageCreateInfo;
if (vkCreateImage(device, &imageInfo, nullptr, &image) != VK_SUCCESS) {
throw std::runtime_error("failed to create image!");
}
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(device, image, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
IsWindows8OrGreater() ? &vulkanExportMemoryWin32HandleInfoKHR : NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
VkMemoryRequirements vkMemoryRequirements = {};
vkGetImageMemoryRequirements(device, image, &vkMemoryRequirements);
totalImageMemSize = vkMemoryRequirements.size;
if (vkAllocateMemory(device, &allocInfo, nullptr, &textureImageMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate image memory!");
}
vkBindImageMemory(device, image, textureImageMemory, 0);
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, cudaUpdateVkSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtCudaUpdateVkSemaphore,
&externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, vkUpdateCudaSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtVkUpdateCudaSemaphore,
&externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
void cudaVkImportImageMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkImageMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkImageMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
#endif
cudaExtMemHandleDesc.size = totalImageMemSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemImageBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryMipmappedArrayDesc externalMemoryMipmappedArrayDesc;
memset(&externalMemoryMipmappedArrayDesc, 0,
sizeof(externalMemoryMipmappedArrayDesc));
hipExtent extent = make_hipExtent(imageWidth, imageHeight, 0);
hipChannelFormatDesc formatDesc;
formatDesc.x = 8;
formatDesc.y = 8;
formatDesc.z = 8;
formatDesc.w = 8;
formatDesc.f = hipChannelFormatKindUnsigned;
externalMemoryMipmappedArrayDesc.offset = 0;
externalMemoryMipmappedArrayDesc.formatDesc = formatDesc;
externalMemoryMipmappedArrayDesc.extent = extent;
externalMemoryMipmappedArrayDesc.flags = 0;
externalMemoryMipmappedArrayDesc.numLevels = mipLevels;
checkCudaErrors(cudaExternalMemoryGetMappedMipmappedArray(
&cudaMipmappedImageArray, cudaExtMemImageBuffer,
&externalMemoryMipmappedArrayDesc));
checkCudaErrors(hipMallocMipmappedArray(&cudaMipmappedImageArrayTemp,
&formatDesc, extent, mipLevels));
checkCudaErrors(hipMallocMipmappedArray(&cudaMipmappedImageArrayOrig,
&formatDesc, extent, mipLevels));
for (int mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
hipArray_t cudaMipLevelArray, cudaMipLevelArrayTemp,
cudaMipLevelArrayOrig;
hipResourceDesc resourceDesc;
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArray, cudaMipmappedImageArray, mipLevelIdx));
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArrayTemp, cudaMipmappedImageArrayTemp, mipLevelIdx));
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArrayOrig, cudaMipmappedImageArrayOrig, mipLevelIdx));
uint32_t width =
(imageWidth >> mipLevelIdx) ? (imageWidth >> mipLevelIdx) : 1;
uint32_t height =
(imageHeight >> mipLevelIdx) ? (imageHeight >> mipLevelIdx) : 1;
checkCudaErrors(hipMemcpy2DArrayToArray(
cudaMipLevelArrayOrig, 0, 0, cudaMipLevelArray, 0, 0,
width * sizeof(uchar4), height, hipMemcpyDeviceToDevice));
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = hipResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArray;
hipSurfaceObject_t surfaceObject;
checkCudaErrors(hipCreateSurfaceObject(&surfaceObject, &resourceDesc));
surfaceObjectList.push_back(surfaceObject);
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = hipResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArrayTemp;
hipSurfaceObject_t surfaceObjectTemp;
checkCudaErrors(
hipCreateSurfaceObject(&surfaceObjectTemp, &resourceDesc));
surfaceObjectListTemp.push_back(surfaceObjectTemp);
}
hipResourceDesc resDescr;
memset(&resDescr, 0, sizeof(hipResourceDesc));
resDescr.resType = hipResourceTypeMipmappedArray;
resDescr.res.mipmap.mipmap = cudaMipmappedImageArrayOrig;
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = true;
texDescr.filterMode = hipFilterModeLinear;
texDescr.mipmapFilterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeWrap;
texDescr.addressMode[1] = hipAddressModeWrap;
texDescr.maxMipmapLevelClamp = float(mipLevels - 1);
texDescr.readMode = hipReadModeNormalizedFloat;
checkCudaErrors(hipCreateTextureObject(&textureObjMipMapInput, &resDescr,
&texDescr, NULL));
checkCudaErrors(hipMalloc((void**)&d_surfaceObjectList,
sizeof(hipSurfaceObject_t) * mipLevels));
checkCudaErrors(hipMalloc((void**)&d_surfaceObjectListTemp,
sizeof(hipSurfaceObject_t) * mipLevels));
checkCudaErrors(hipMemcpy(d_surfaceObjectList, surfaceObjectList.data(),
sizeof(hipSurfaceObject_t) * mipLevels,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(
d_surfaceObjectListTemp, surfaceObjectListTemp.data(),
sizeof(hipSurfaceObject_t) * mipLevels, hipMemcpyHostToDevice));
printf("CUDA Kernel Vulkan image buffer\n");
}
void cudaUpdateVkImage() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaSemaphore);
int nthreads = 128;
/*Perform 2D box filter on image using CUDA */
hipLaunchKernelGGL(( d_boxfilter_rgba_x), dim3(imageHeight / nthreads), dim3(nthreads), 0, streamToRun,
d_surfaceObjectListTemp, textureObjMipMapInput, imageWidth, imageHeight,
mipLevels, filter_radius);
hipLaunchKernelGGL(( d_boxfilter_rgba_y), dim3(imageWidth / nthreads), dim3(nthreads), 0, streamToRun,
d_surfaceObjectList, d_surfaceObjectListTemp, imageWidth, imageHeight,
mipLevels, filter_radius);
varySigma();
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkSemaphore);
}
void transitionImageLayout(VkImage image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = mipLevels;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
VkPipelineStageFlags sourceStage;
VkPipelineStageFlags destinationStage;
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
} else {
throw std::invalid_argument("unsupported layout transition!");
}
vkCmdPipelineBarrier(commandBuffer, sourceStage, destinationStage, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
void copyBufferToImage(VkBuffer buffer, VkImage image, uint32_t width,
uint32_t height) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.imageExtent = {width, height, 1};
vkCmdCopyBufferToImage(commandBuffer, buffer, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
endSingleTimeCommands(commandBuffer);
}
void createVertexBuffer() {
VkDeviceSize bufferSize = sizeof(vertices[0]) * vertices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, vertices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBuffer, vertexBufferMemory);
copyBuffer(stagingBuffer, vertexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createIndexBuffer() {
VkDeviceSize bufferSize = sizeof(indices[0]) * indices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, indices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBuffer, indexBufferMemory);
copyBuffer(stagingBuffer, indexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createUniformBuffers() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
uniformBuffers.resize(swapChainImages.size());
uniformBuffersMemory.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffers[i], uniformBuffersMemory[i]);
}
}
void createDescriptorPool() {
std::array<VkDescriptorPoolSize, 2> poolSizes = {};
poolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[0].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
poolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[1].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = static_cast<uint32_t>(swapChainImages.size());
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(swapChainImages.size(),
descriptorSetLayout);
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount =
static_cast<uint32_t>(swapChainImages.size());
allocInfo.pSetLayouts = layouts.data();
descriptorSets.resize(swapChainImages.size());
if (vkAllocateDescriptorSets(device, &allocInfo, descriptorSets.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor sets!");
}
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffers[i];
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView;
imageInfo.sampler = textureSampler;
std::array<VkWriteDescriptorSet, 2> descriptorWrites = {};
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].dstSet = descriptorSets[i];
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].dstArrayElement = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].dstSet = descriptorSets[i];
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstArrayElement = 0;
descriptorWrites[1].descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
vkUpdateDescriptorSets(device,
static_cast<uint32_t>(descriptorWrites.size()),
descriptorWrites.data(), 0, nullptr);
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
VkCommandBuffer beginSingleTimeCommands() {
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(commandBuffer, &beginInfo);
return commandBuffer;
}
void endSingleTimeCommands(VkCommandBuffer commandBuffer) {
vkEndCommandBuffer(commandBuffer);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
vkQueueWaitIdle(graphicsQueue);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
}
void copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferCopy copyRegion = {};
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
endSingleTimeCommands(commandBuffer);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) &&
(memProperties.memoryTypes[i].propertyFlags & properties) ==
properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], indexBuffer, 0,
VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSets[i], 0, nullptr);
vkCmdDrawIndexed(commandBuffers[i], static_cast<uint32_t>(indices.size()),
1, 0, 0, 0);
// vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertices.size()), 1,
// 0, 0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
void createSyncObjects() {
imageAvailableSemaphores.resize(MAX_FRAMES);
renderFinishedSemaphores.resize(MAX_FRAMES);
inFlightFences.resize(MAX_FRAMES);
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
for (size_t i = 0; i < MAX_FRAMES; i++) {
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphores[i]) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphores[i]) != VK_SUCCESS ||
vkCreateFence(device, &fenceInfo, nullptr, &inFlightFences[i]) !=
VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 0.0f, 0.0f, 1.0f, degreesToRadians(135.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
for (size_t i = 0; i < swapChainImages.size(); i++) {
void* data;
vkMapMemory(device, uniformBuffersMemory[i], 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBuffersMemory[i]);
}
}
void drawFrame() {
static int startSubmit = 0;
vkWaitForFences(device, 1, &inFlightFences[currentFrame], VK_TRUE,
std::numeric_limits<uint64_t>::max());
uint32_t imageIndex;
VkResult result = vkAcquireNextImageKHR(
device, swapChain, std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphores[currentFrame], VK_NULL_HANDLE, &imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
recreateSwapChain();
return;
} else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
throw std::runtime_error("failed to acquire swap chain image!");
}
vkResetFences(device, 1, &inFlightFences[currentFrame]);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame]};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
result = vkQueuePresentKHR(presentQueue, &presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR ||
framebufferResized) {
framebufferResized = false;
recreateSwapChain();
} else if (result != VK_SUCCESS) {
throw std::runtime_error("failed to present swap chain image!");
}
cudaUpdateVkImage();
currentFrame = (currentFrame + 1) % MAX_FRAMES;
// Added sleep of 10 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(10000));
char title[256];
sprintf(title, "Vulkan Image CUDA Box Filter (radius=%d)", filter_radius);
glfwSetWindowTitle(window, title);
}
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame]};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame],
cudaUpdateVkSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR>& availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
int width, height;
glfwGetFramebufferSize(window, &width, &height);
VkExtent2D actualExtent = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height)};
actualExtent.width = ::max(
capabilities.minImageExtent.width,
::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = ::max(
capabilities.minImageExtent.height,
::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
VkPhysicalDeviceFeatures supportedFeatures;
vkGetPhysicalDeviceFeatures(device, &supportedFeatures);
return indices.isComplete() && extensionsSupported && swapChainAdequate &&
supportedFeatures.samplerAnisotropy;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
std::vector<const char*> getRequiredExtensions() {
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> extensions(glfwExtensions,
glfwExtensions + glfwExtensionCount);
if (enableValidationLayers) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
return extensions;
}
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open file!");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData) {
std::cerr << "validation layer: " << pCallbackData->pMessage << std::endl;
return VK_FALSE;
}
};
int main(int argc, char** argv) {
execution_path = argv[0];
std::string image_filename = "lenaRGB.ppm";
if (checkCmdLineFlag(argc, (const char**)argv, "file")) {
getCmdLineArgumentString(argc, (const char**)argv, "file",
(char**)&image_filename);
}
vulkanImageCUDA app;
try {
// This app only works on ppm images
app.loadImageData(image_filename);
app.run();
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| 69439bbbffd4c530c60aef0bff5bffe3986ba304.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define GLFW_INCLUDE_VULKAN
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#ifdef _WIN64
#include <vulkan/vulkan_win32.h>
#endif
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <helper_math.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
const int MAX_FRAMES = 4;
const std::vector<const char*> validationLayers = {
"VK_LAYER_KHRONOS_validation"};
#ifdef NDEBUG
const bool enableValidationLayers = false;
#else
const bool enableValidationLayers = false;
#endif
std::string execution_path;
VkResult CreateDebugUtilsMessengerEXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugUtilsMessengerEXT* pDebugMessenger) {
auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugUtilsMessengerEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
void DestroyDebugUtilsMessengerEXT(VkInstance instance,
VkDebugUtilsMessengerEXT debugMessenger,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugUtilsMessengerEXT");
if (func != nullptr) {
func(instance, debugMessenger, pAllocator);
}
}
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
typedef float vec2[2];
struct Vertex {
vec4 pos;
vec3 color;
vec2 texCoord;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 3>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
};
struct UniformBufferObject {
alignas(16) mat4x4 model;
alignas(16) mat4x4 view;
alignas(16) mat4x4 proj;
};
const std::vector<Vertex> vertices = {
{{-1.0f, -1.0f, 0.0f, 1.0f}, {1.0f, 0.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, -1.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {1.0f, 1.0f}},
{{-1.0f, 1.0f, 0.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {0.0f, 1.0f}}};
const std::vector<uint16_t> indices = {0, 1, 2, 2, 3, 0};
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c) {
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c >> 8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c >> 16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c >> 24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
int filter_radius = 14;
int g_nFilterSign = 1;
// This varies the filter radius, so we can see automatic animation
void varySigma() {
filter_radius += g_nFilterSign;
if (filter_radius > 64) {
filter_radius = 64; // clamp to 64 and then negate sign
g_nFilterSign = -1;
} else if (filter_radius < 0) {
filter_radius = 0;
g_nFilterSign = 1;
}
}
// row pass using texture lookups
__global__ void d_boxfilter_rgba_x(cudaSurfaceObject_t* dstSurfMipMapArray,
cudaTextureObject_t textureMipMapInput,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
float scale = 1.0f / (float)((filter_radius << 1) + 1);
unsigned int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y < baseHeight) {
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (y < height && filter_radius < width) {
float px = 1.0 / width;
float py = 1.0 / height;
float4 t = make_float4(0.0f);
for (int x = -filter_radius; x <= filter_radius; x++) {
t += tex2DLod<float4>(textureMipMapInput, x * px, y * py,
(float)mipLevelIdx);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], 0, y);
for (int x = 1; x < width; x++) {
t += tex2DLod<float4>(textureMipMapInput, (x + filter_radius) * px,
y * py, (float)mipLevelIdx);
t -=
tex2DLod<float4>(textureMipMapInput, (x - filter_radius - 1) * px,
y * py, (float)mipLevelIdx);
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx],
x * sizeof(uchar4), y);
}
}
}
}
}
// column pass using coalesced global memory reads
__global__ void d_boxfilter_rgba_y(cudaSurfaceObject_t* dstSurfMipMapArray,
cudaSurfaceObject_t* srcSurfMipMapArray,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)((filter_radius << 1) + 1);
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (x < width && height > filter_radius) {
float4 t;
// do left edge
int colInBytes = x * sizeof(uchar4);
unsigned int pixFirst = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, 0);
t = rgbaIntToFloat(pixFirst) * filter_radius;
for (int y = 0; (y < (filter_radius + 1)) && (y < height); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y);
t += rgbaIntToFloat(pix);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, 0);
for (int y = 1; (y < filter_radius + 1) && ((y + filter_radius) < height);
y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
t -= rgbaIntToFloat(pixFirst);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// main loop
for (int y = (filter_radius + 1); y < (height - filter_radius); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
pix = surf2Dread<unsigned int>(srcSurfMipMapArray[mipLevelIdx],
colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// do right edge
unsigned int pixLast = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, height - 1);
for (int y = height - filter_radius;
(y < height) && ((y - filter_radius - 1) > 1); y++) {
t += rgbaIntToFloat(pixLast);
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
}
}
}
class vulkanImageCUDA {
public:
void loadImageData(const std::string& filename) {
// load image (needed so we can get the width and height before we create
// the window
char* image_path =
sdkFindFilePath(filename.c_str(), execution_path.c_str());
if (image_path == 0) {
printf("Error finding image file '%s'\n", filename.c_str());
exit(EXIT_FAILURE);
}
sdkLoadPPM4(image_path, (unsigned char**)&image_data, &imageWidth,
&imageHeight);
if (!image_data) {
printf("Error opening file '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, imageWidth,
imageHeight);
}
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkDebugUtilsMessengerEXT debugMessenger;
VkSurfaceKHR surface;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkRenderPass renderPass;
VkDescriptorSetLayout descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline graphicsPipeline;
VkCommandPool commandPool;
VkImage textureImage;
VkDeviceMemory textureImageMemory;
VkImageView textureImageView;
VkSampler textureSampler;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
std::vector<VkBuffer> uniformBuffers;
std::vector<VkDeviceMemory> uniformBuffersMemory;
VkDescriptorPool descriptorPool;
std::vector<VkDescriptorSet> descriptorSets;
std::vector<VkCommandBuffer> commandBuffers;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
VkSemaphore cudaUpdateVkSemaphore, vkUpdateCudaSemaphore;
std::vector<VkFence> inFlightFences;
size_t currentFrame = 0;
bool framebufferResized = false;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR = NULL;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR = NULL;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
unsigned int* image_data = NULL;
unsigned int imageWidth, imageHeight;
unsigned int mipLevels;
size_t totalImageMemSize;
// CUDA objects
cudaExternalMemory_t cudaExtMemImageBuffer;
cudaMipmappedArray_t cudaMipmappedImageArray, cudaMipmappedImageArrayTemp,
cudaMipmappedImageArrayOrig;
std::vector<cudaSurfaceObject_t> surfaceObjectList, surfaceObjectListTemp;
cudaSurfaceObject_t *d_surfaceObjectList, *d_surfaceObjectListTemp;
cudaTextureObject_t textureObjMipMapInput;
cudaExternalSemaphore_t cudaExtCudaUpdateVkSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaSemaphore;
cudaStream_t streamToRun;
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Image CUDA Box Filter",
nullptr, nullptr);
glfwSetWindowUserPointer(window, this);
glfwSetFramebufferSizeCallback(window, framebufferResizeCallback);
}
static void framebufferResizeCallback(GLFWwindow* window, int width,
int height) {
auto app =
reinterpret_cast<vulkanImageCUDA*>(glfwGetWindowUserPointer(window));
app->framebufferResized = true;
}
void initVulkan() {
createInstance();
setupDebugMessenger();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createTextureImage();
createTextureImageView();
createTextureSampler();
createVertexBuffer();
createIndexBuffer();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
checkCudaErrors(cudaStreamCreate(&streamToRun));
cudaVkImportImageMem();
cudaVkImportSemaphore();
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void cleanupSwapChain() {
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
vkFreeCommandBuffers(device, commandPool,
static_cast<uint32_t>(commandBuffers.size()),
commandBuffers.data());
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroySwapchainKHR(device, swapChain, nullptr);
for (size_t i = 0; i < swapChainImages.size(); i++) {
vkDestroyBuffer(device, uniformBuffers[i], nullptr);
vkFreeMemory(device, uniformBuffersMemory[i], nullptr);
}
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
}
void cleanup() {
cleanupSwapChain();
vkDestroySampler(device, textureSampler, nullptr);
vkDestroyImageView(device, textureImageView, nullptr);
for (int i = 0; i < mipLevels; i++) {
checkCudaErrors(cudaDestroySurfaceObject(surfaceObjectList[i]));
checkCudaErrors(cudaDestroySurfaceObject(surfaceObjectListTemp[i]));
}
checkCudaErrors(cudaFree(d_surfaceObjectList));
checkCudaErrors(cudaFree(d_surfaceObjectListTemp));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArrayTemp));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArrayOrig));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArray));
checkCudaErrors(cudaDestroyTextureObject(textureObjMipMapInput));
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemImageBuffer));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkSemaphore));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaSemaphore));
vkDestroyImage(device, textureImage, nullptr);
vkFreeMemory(device, textureImageMemory, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, indexBuffer, nullptr);
vkFreeMemory(device, indexBufferMemory, nullptr);
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
for (size_t i = 0; i < MAX_FRAMES; i++) {
vkDestroySemaphore(device, renderFinishedSemaphores[i], nullptr);
vkDestroySemaphore(device, imageAvailableSemaphores[i], nullptr);
vkDestroyFence(device, inFlightFences[i], nullptr);
}
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyDevice(device, nullptr);
if (enableValidationLayers) {
DestroyDebugUtilsMessengerEXT(instance, debugMessenger, nullptr);
}
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
void recreateSwapChain() {
int width = 0, height = 0;
while (width == 0 || height == 0) {
glfwGetFramebufferSize(window, &width, &height);
glfwWaitEvents();
}
vkDeviceWaitIdle(device);
cleanupSwapChain();
createSwapChain();
createImageViews();
createRenderPass();
createGraphicsPipeline();
createFramebuffers();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan Image CUDA Interop";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
auto extensions = getRequiredExtensions();
createInfo.enabledExtensionCount = static_cast<uint32_t>(extensions.size());
createInfo.ppEnabledExtensionNames = extensions.data();
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo;
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
populateDebugMessengerCreateInfo(debugCreateInfo);
createInfo.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debugCreateInfo;
} else {
createInfo.enabledLayerCount = 0;
createInfo.pNext = nullptr;
}
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
} else {
std::cout << "Vulkan proc address for vkGetMemoryFdKHR - "
<< fpGetMemoryFdKHR << std::endl;
}
#endif
}
void populateDebugMessengerCreateInfo(
VkDebugUtilsMessengerCreateInfoEXT& createInfo) {
createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugCallback;
}
void setupDebugMessenger() {
if (!enableValidationLayers) return;
VkDebugUtilsMessengerCreateInfoEXT createInfo;
populateDebugMessengerCreateInfo(createInfo);
if (CreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr,
&debugMessenger) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug messenger!");
}
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
cudaGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != cudaComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(cudaSetDevice(current_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
swapChainImageViews[i] =
createImageView(swapChainImages[i], swapChainImageFormat);
}
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.pImmutableSamplers = nullptr;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkDescriptorSetLayoutBinding samplerLayoutBinding = {};
samplerLayoutBinding.binding = 1;
samplerLayoutBinding.descriptorCount = 1;
samplerLayoutBinding.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
samplerLayoutBinding.pImmutableSamplers = nullptr;
samplerLayoutBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
std::array<VkDescriptorSetLayoutBinding, 2> bindings = {
uboLayoutBinding, samplerLayoutBinding};
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = static_cast<uint32_t>(bindings.size());
layoutInfo.pBindings = bindings.data();
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader.vert");
auto fragShaderCode = readFile("shader.frag");
VkShaderModule vertShaderModule = createShaderModule(vertShaderCode);
VkShaderModule fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f;
colorBlending.blendConstants[1] = 0.0f;
colorBlending.blendConstants[2] = 0.0f;
colorBlending.blendConstants[3] = 0.0f;
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout;
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE;
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create graphics command pool!");
}
}
void createTextureImage() {
VkDeviceSize imageSize = imageWidth * imageHeight * 4;
mipLevels = static_cast<uint32_t>(
std::floor(std::log2(std::max(imageWidth, imageHeight)))) +
1;
printf("mipLevels = %d\n", mipLevels);
if (!image_data) {
throw std::runtime_error("failed to load texture image!");
}
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, imageSize, 0, &data);
memcpy(data, image_data, static_cast<size_t>(imageSize));
vkUnmapMemory(device, stagingBufferMemory);
// VK_FORMAT_R8G8B8A8_UNORM changed to VK_FORMAT_R8G8B8A8_UINT
createImage(
imageWidth, imageHeight, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, textureImage, textureImageMemory);
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copyBufferToImage(stagingBuffer, textureImage,
static_cast<uint32_t>(imageWidth),
static_cast<uint32_t>(imageHeight));
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
generateMipmaps(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void generateMipmaps(VkImage image, VkFormat imageFormat) {
VkFormatProperties formatProperties;
vkGetPhysicalDeviceFormatProperties(physicalDevice, imageFormat,
&formatProperties);
if (!(formatProperties.optimalTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
throw std::runtime_error(
"texture image format does not support linear blitting!");
}
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = image;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.subresourceRange.levelCount = 1;
int32_t mipWidth = imageWidth;
int32_t mipHeight = imageHeight;
for (uint32_t i = 1; i < mipLevels; i++) {
barrier.subresourceRange.baseMipLevel = i - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
VkImageBlit blit = {};
blit.srcOffsets[0] = {0, 0, 0};
blit.srcOffsets[1] = {mipWidth, mipHeight, 1};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.mipLevel = i - 1;
blit.srcSubresource.baseArrayLayer = 0;
blit.srcSubresource.layerCount = 1;
blit.dstOffsets[0] = {0, 0, 0};
blit.dstOffsets[1] = {mipWidth > 1 ? mipWidth / 2 : 1,
mipHeight > 1 ? mipHeight / 2 : 1, 1};
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.mipLevel = i;
blit.dstSubresource.baseArrayLayer = 0;
blit.dstSubresource.layerCount = 1;
vkCmdBlitImage(commandBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit,
VK_FILTER_LINEAR);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
if (mipWidth > 1) mipWidth /= 2;
if (mipHeight > 1) mipHeight /= 2;
}
barrier.subresourceRange.baseMipLevel = mipLevels - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
#ifdef _WIN64 // For windows
HANDLE getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = textureImageMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = textureImageMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void createTextureImageView() {
textureImageView = createImageView(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void createTextureSampler() {
VkSamplerCreateInfo samplerInfo = {};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.anisotropyEnable = VK_TRUE;
samplerInfo.maxAnisotropy = 16;
samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
samplerInfo.unnormalizedCoordinates = VK_FALSE;
samplerInfo.compareEnable = VK_FALSE;
samplerInfo.compareOp = VK_COMPARE_OP_ALWAYS;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = 0; // Optional
samplerInfo.maxLod = static_cast<float>(mipLevels);
samplerInfo.mipLodBias = 0; // Optional
if (vkCreateSampler(device, &samplerInfo, nullptr, &textureSampler) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture sampler!");
}
}
VkImageView createImageView(VkImage image, VkFormat format) {
VkImageViewCreateInfo viewInfo = {};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = image;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewInfo.format = format;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = mipLevels;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
VkImageView imageView;
if (vkCreateImageView(device, &viewInfo, nullptr, &imageView) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture image view!");
}
return imageView;
}
void createImage(uint32_t width, uint32_t height, VkFormat format,
VkImageTiling tiling, VkImageUsageFlags usage,
VkMemoryPropertyFlags properties, VkImage& image,
VkDeviceMemory& imageMemory) {
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.extent.width = width;
imageInfo.extent.height = height;
imageInfo.extent.depth = 1;
imageInfo.mipLevels = mipLevels;
imageInfo.arrayLayers = 1;
imageInfo.format = format;
imageInfo.tiling = tiling;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.usage = usage;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryImageCreateInfo vkExternalMemImageCreateInfo = {};
vkExternalMemImageCreateInfo.sType =
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
vkExternalMemImageCreateInfo.pNext = NULL;
vkExternalMemImageCreateInfo.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
imageInfo.pNext = &vkExternalMemImageCreateInfo;
if (vkCreateImage(device, &imageInfo, nullptr, &image) != VK_SUCCESS) {
throw std::runtime_error("failed to create image!");
}
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(device, image, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
IsWindows8OrGreater() ? &vulkanExportMemoryWin32HandleInfoKHR : NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
VkMemoryRequirements vkMemoryRequirements = {};
vkGetImageMemoryRequirements(device, image, &vkMemoryRequirements);
totalImageMemSize = vkMemoryRequirements.size;
if (vkAllocateMemory(device, &allocInfo, nullptr, &textureImageMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate image memory!");
}
vkBindImageMemory(device, image, textureImageMemory, 0);
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, cudaUpdateVkSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtCudaUpdateVkSemaphore,
&externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, vkUpdateCudaSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtVkUpdateCudaSemaphore,
&externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
void cudaVkImportImageMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkImageMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkImageMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
#endif
cudaExtMemHandleDesc.size = totalImageMemSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemImageBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryMipmappedArrayDesc externalMemoryMipmappedArrayDesc;
memset(&externalMemoryMipmappedArrayDesc, 0,
sizeof(externalMemoryMipmappedArrayDesc));
cudaExtent extent = make_cudaExtent(imageWidth, imageHeight, 0);
cudaChannelFormatDesc formatDesc;
formatDesc.x = 8;
formatDesc.y = 8;
formatDesc.z = 8;
formatDesc.w = 8;
formatDesc.f = cudaChannelFormatKindUnsigned;
externalMemoryMipmappedArrayDesc.offset = 0;
externalMemoryMipmappedArrayDesc.formatDesc = formatDesc;
externalMemoryMipmappedArrayDesc.extent = extent;
externalMemoryMipmappedArrayDesc.flags = 0;
externalMemoryMipmappedArrayDesc.numLevels = mipLevels;
checkCudaErrors(cudaExternalMemoryGetMappedMipmappedArray(
&cudaMipmappedImageArray, cudaExtMemImageBuffer,
&externalMemoryMipmappedArrayDesc));
checkCudaErrors(cudaMallocMipmappedArray(&cudaMipmappedImageArrayTemp,
&formatDesc, extent, mipLevels));
checkCudaErrors(cudaMallocMipmappedArray(&cudaMipmappedImageArrayOrig,
&formatDesc, extent, mipLevels));
for (int mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
cudaArray_t cudaMipLevelArray, cudaMipLevelArrayTemp,
cudaMipLevelArrayOrig;
cudaResourceDesc resourceDesc;
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArray, cudaMipmappedImageArray, mipLevelIdx));
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArrayTemp, cudaMipmappedImageArrayTemp, mipLevelIdx));
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArrayOrig, cudaMipmappedImageArrayOrig, mipLevelIdx));
uint32_t width =
(imageWidth >> mipLevelIdx) ? (imageWidth >> mipLevelIdx) : 1;
uint32_t height =
(imageHeight >> mipLevelIdx) ? (imageHeight >> mipLevelIdx) : 1;
checkCudaErrors(cudaMemcpy2DArrayToArray(
cudaMipLevelArrayOrig, 0, 0, cudaMipLevelArray, 0, 0,
width * sizeof(uchar4), height, cudaMemcpyDeviceToDevice));
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = cudaResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArray;
cudaSurfaceObject_t surfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&surfaceObject, &resourceDesc));
surfaceObjectList.push_back(surfaceObject);
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = cudaResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArrayTemp;
cudaSurfaceObject_t surfaceObjectTemp;
checkCudaErrors(
cudaCreateSurfaceObject(&surfaceObjectTemp, &resourceDesc));
surfaceObjectListTemp.push_back(surfaceObjectTemp);
}
cudaResourceDesc resDescr;
memset(&resDescr, 0, sizeof(cudaResourceDesc));
resDescr.resType = cudaResourceTypeMipmappedArray;
resDescr.res.mipmap.mipmap = cudaMipmappedImageArrayOrig;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = true;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.mipmapFilterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.addressMode[1] = cudaAddressModeWrap;
texDescr.maxMipmapLevelClamp = float(mipLevels - 1);
texDescr.readMode = cudaReadModeNormalizedFloat;
checkCudaErrors(cudaCreateTextureObject(&textureObjMipMapInput, &resDescr,
&texDescr, NULL));
checkCudaErrors(cudaMalloc((void**)&d_surfaceObjectList,
sizeof(cudaSurfaceObject_t) * mipLevels));
checkCudaErrors(cudaMalloc((void**)&d_surfaceObjectListTemp,
sizeof(cudaSurfaceObject_t) * mipLevels));
checkCudaErrors(cudaMemcpy(d_surfaceObjectList, surfaceObjectList.data(),
sizeof(cudaSurfaceObject_t) * mipLevels,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(
d_surfaceObjectListTemp, surfaceObjectListTemp.data(),
sizeof(cudaSurfaceObject_t) * mipLevels, cudaMemcpyHostToDevice));
printf("CUDA Kernel Vulkan image buffer\n");
}
void cudaUpdateVkImage() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaSemaphore);
int nthreads = 128;
/*Perform 2D box filter on image using CUDA */
d_boxfilter_rgba_x<<<imageHeight / nthreads, nthreads, 0, streamToRun>>>(
d_surfaceObjectListTemp, textureObjMipMapInput, imageWidth, imageHeight,
mipLevels, filter_radius);
d_boxfilter_rgba_y<<<imageWidth / nthreads, nthreads, 0, streamToRun>>>(
d_surfaceObjectList, d_surfaceObjectListTemp, imageWidth, imageHeight,
mipLevels, filter_radius);
varySigma();
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkSemaphore);
}
void transitionImageLayout(VkImage image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = mipLevels;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
VkPipelineStageFlags sourceStage;
VkPipelineStageFlags destinationStage;
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
} else {
throw std::invalid_argument("unsupported layout transition!");
}
vkCmdPipelineBarrier(commandBuffer, sourceStage, destinationStage, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
void copyBufferToImage(VkBuffer buffer, VkImage image, uint32_t width,
uint32_t height) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.imageExtent = {width, height, 1};
vkCmdCopyBufferToImage(commandBuffer, buffer, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
endSingleTimeCommands(commandBuffer);
}
void createVertexBuffer() {
VkDeviceSize bufferSize = sizeof(vertices[0]) * vertices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, vertices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBuffer, vertexBufferMemory);
copyBuffer(stagingBuffer, vertexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createIndexBuffer() {
VkDeviceSize bufferSize = sizeof(indices[0]) * indices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, indices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBuffer, indexBufferMemory);
copyBuffer(stagingBuffer, indexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createUniformBuffers() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
uniformBuffers.resize(swapChainImages.size());
uniformBuffersMemory.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffers[i], uniformBuffersMemory[i]);
}
}
void createDescriptorPool() {
std::array<VkDescriptorPoolSize, 2> poolSizes = {};
poolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[0].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
poolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[1].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = static_cast<uint32_t>(swapChainImages.size());
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(swapChainImages.size(),
descriptorSetLayout);
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount =
static_cast<uint32_t>(swapChainImages.size());
allocInfo.pSetLayouts = layouts.data();
descriptorSets.resize(swapChainImages.size());
if (vkAllocateDescriptorSets(device, &allocInfo, descriptorSets.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor sets!");
}
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffers[i];
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView;
imageInfo.sampler = textureSampler;
std::array<VkWriteDescriptorSet, 2> descriptorWrites = {};
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].dstSet = descriptorSets[i];
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].dstArrayElement = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].dstSet = descriptorSets[i];
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstArrayElement = 0;
descriptorWrites[1].descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
vkUpdateDescriptorSets(device,
static_cast<uint32_t>(descriptorWrites.size()),
descriptorWrites.data(), 0, nullptr);
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
VkCommandBuffer beginSingleTimeCommands() {
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(commandBuffer, &beginInfo);
return commandBuffer;
}
void endSingleTimeCommands(VkCommandBuffer commandBuffer) {
vkEndCommandBuffer(commandBuffer);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
vkQueueWaitIdle(graphicsQueue);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
}
void copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferCopy copyRegion = {};
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
endSingleTimeCommands(commandBuffer);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) &&
(memProperties.memoryTypes[i].propertyFlags & properties) ==
properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], indexBuffer, 0,
VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSets[i], 0, nullptr);
vkCmdDrawIndexed(commandBuffers[i], static_cast<uint32_t>(indices.size()),
1, 0, 0, 0);
// vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertices.size()), 1,
// 0, 0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
void createSyncObjects() {
imageAvailableSemaphores.resize(MAX_FRAMES);
renderFinishedSemaphores.resize(MAX_FRAMES);
inFlightFences.resize(MAX_FRAMES);
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
for (size_t i = 0; i < MAX_FRAMES; i++) {
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphores[i]) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphores[i]) != VK_SUCCESS ||
vkCreateFence(device, &fenceInfo, nullptr, &inFlightFences[i]) !=
VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 0.0f, 0.0f, 1.0f, degreesToRadians(135.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
for (size_t i = 0; i < swapChainImages.size(); i++) {
void* data;
vkMapMemory(device, uniformBuffersMemory[i], 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBuffersMemory[i]);
}
}
void drawFrame() {
static int startSubmit = 0;
vkWaitForFences(device, 1, &inFlightFences[currentFrame], VK_TRUE,
std::numeric_limits<uint64_t>::max());
uint32_t imageIndex;
VkResult result = vkAcquireNextImageKHR(
device, swapChain, std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphores[currentFrame], VK_NULL_HANDLE, &imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
recreateSwapChain();
return;
} else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
throw std::runtime_error("failed to acquire swap chain image!");
}
vkResetFences(device, 1, &inFlightFences[currentFrame]);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame]};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
result = vkQueuePresentKHR(presentQueue, &presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR ||
framebufferResized) {
framebufferResized = false;
recreateSwapChain();
} else if (result != VK_SUCCESS) {
throw std::runtime_error("failed to present swap chain image!");
}
cudaUpdateVkImage();
currentFrame = (currentFrame + 1) % MAX_FRAMES;
// Added sleep of 10 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(10000));
char title[256];
sprintf(title, "Vulkan Image CUDA Box Filter (radius=%d)", filter_radius);
glfwSetWindowTitle(window, title);
}
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame]};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame],
cudaUpdateVkSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR>& availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
int width, height;
glfwGetFramebufferSize(window, &width, &height);
VkExtent2D actualExtent = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height)};
actualExtent.width = std::max(
capabilities.minImageExtent.width,
std::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = std::max(
capabilities.minImageExtent.height,
std::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
VkPhysicalDeviceFeatures supportedFeatures;
vkGetPhysicalDeviceFeatures(device, &supportedFeatures);
return indices.isComplete() && extensionsSupported && swapChainAdequate &&
supportedFeatures.samplerAnisotropy;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
std::vector<const char*> getRequiredExtensions() {
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> extensions(glfwExtensions,
glfwExtensions + glfwExtensionCount);
if (enableValidationLayers) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
return extensions;
}
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open file!");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData) {
std::cerr << "validation layer: " << pCallbackData->pMessage << std::endl;
return VK_FALSE;
}
};
int main(int argc, char** argv) {
execution_path = argv[0];
std::string image_filename = "lenaRGB.ppm";
if (checkCmdLineFlag(argc, (const char**)argv, "file")) {
getCmdLineArgumentString(argc, (const char**)argv, "file",
(char**)&image_filename);
}
vulkanImageCUDA app;
try {
// This app only works on ppm images
app.loadImageData(image_filename);
app.run();
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
cd63ab2603244e6640f006b0374d0c21abf08866.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float tanh_(float x)
{
// e**2x - 1
// ---------
// e**2x + 1
float exp2x = exp(2.0*x);
return (exp2x - 1.0)/(exp2x + 1.0);
}
__global__ void LSTM1(float* layer1, float* lstm1, const float* gate1i, const float* gate1o, const int offset)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //256
float g_i = gate1i[256*offset + i];
float g_f = 1.0 - g_i;
float g_o = gate1o[256*offset + i];
float i_t = tanh_(layer1[256*offset + i]) * g_i;
float i_p = 0.0;
if (offset > 0)
i_p = g_f * lstm1[256*(offset-1) + i];
float sum = i_p + i_t;
lstm1[256*offset + i] = sum;
layer1[256*offset + i] = tanh_(sum) * g_o;
} | cd63ab2603244e6640f006b0374d0c21abf08866.cu | #include "includes.h"
__device__ float tanh_(float x)
{
// e**2x - 1
// ---------
// e**2x + 1
float exp2x = exp(2.0*x);
return (exp2x - 1.0)/(exp2x + 1.0);
}
__global__ void LSTM1(float* layer1, float* lstm1, const float* gate1i, const float* gate1o, const int offset)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //256
float g_i = gate1i[256*offset + i];
float g_f = 1.0 - g_i;
float g_o = gate1o[256*offset + i];
float i_t = tanh_(layer1[256*offset + i]) * g_i;
float i_p = 0.0;
if (offset > 0)
i_p = g_f * lstm1[256*(offset-1) + i];
float sum = i_p + i_t;
lstm1[256*offset + i] = sum;
layer1[256*offset + i] = tanh_(sum) * g_o;
} |
6f16cc66fe40208d853e20251374008675c463a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 1280;
int const LENGTH = 1280;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 128;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const transformSizeX_nodepth = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS;
//MASK :2X2
int const MASKX = 3;
int const MASKY = 3;
int const MASKPADX = convLayerSizeX % MASKX;
int const MASKPADY = convLayerSizeY % MASKY;
int const MASK_SIZE = MASKX * MASKY;
int const pseudo_transformSizeY = (convLayerSizeY + MASKPADY) * (convLayerSizeX + MASKPADX);
int const MASKS_IN_Y = convLayerSizeY / (MASKY + MASKPADY);
int const MASKS_IN_X = convLayerSizeX / (MASKX + MASKPADX);
#define COUT_input if (1) std::cout
#define COUT_result if (1) std::cout
__global__
void Convolution(float* inputMatrix, float* reducedMatrix, float* weights, float* result)
{
//__shared__ int flag;
int Y = blockIdx.x * MASK_SIZE + (threadIdx.x / transformSizeX);
int X = threadIdx.x % transformSizeX;
int maskX_offset = Y % MASKX;
int maskY_offset = (Y % MASK_SIZE) / MASKX;
if ((maskX_offset == maskY_offset))
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
for (int j = 0; j < NUM_OF_KERNELS; j++)
{
atomicAdd(result + (j * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[j * transformSizeX + X]);
}
}
__syncthreads();
if (!(maskX_offset == maskY_offset) && 0)
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
for (int j = 0; j < NUM_OF_KERNELS; j++)
{
atomicAdd(result + (j * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[j * transformSizeX + X]);
}
}
/*
int Y = blockIdx.x * MASK_SIZE + (threadIdx.x / transformSizeX);
int X = threadIdx.x % transformSizeX;
int Z = blockIdx.x / blocks_number;
printf("indx= %d\n", blockIdx.x);
int maskX_offset = Y % MASKX;
int maskY_offset = (Y % MASK_SIZE) / MASKX;
if (!(maskX_offset == maskY_offset))
{
int MaskNum = (Y / MASK_SIZE);
int masksX = (MaskNum % MASKS_IN_X) * MASKX;
int masksY = (MaskNum / MASKS_IN_X) * MASKY;
int convXNew = masksX + maskX_offset;
int convYNew = masksY + maskY_offset;
__syncthreads();
if(result[Z * convLayerSizeX * convLayerSizeY + masksY * convLayerSizeX + masksX] != 0 &&
result[Z * convLayerSizeX * convLayerSizeY + (convYNew + 1) * convLayerSizeX + masksX + 1] != 0 &&
result[Z * convLayerSizeX * convLayerSizeY + (convYNew + 2) * convLayerSizeX + masksX + 2] != 0 )
{
if (Z == 0)
{
printf("z= %d\n", Z);
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
atomicAdd(result + (Z * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[Z * transformSizeX + X]);
}
}
else
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
if (Z == 0)
{
printf("z= %d\n", Z);
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
atomicAdd(result + (Z * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[Z * transformSizeX + X]);
__syncthreads();
}*/
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostTransformedInput = new float[pseudo_transformSizeY * transformSizeX]();
float* hostConvResult = new float[CONV_FINAL_SIZE]();
float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX];
// GENERATING INPUT
std::cout << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceTransformedInput;
float* deviceConvLayerWeights;
float* deviceConvResult;
cudaStatus = hipMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceTransformedInput, (pseudo_transformSizeY * transformSizeX) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(transformSizeX * MASK_SIZE, 1);
dim3 blocksPerGrid(ceil(double(pseudo_transformSizeY) / double(MASK_SIZE)), 1);
/*
if (transformSizeY * transformSizeX > 1024) {
threadsPerBlock.x = transformSizeX;
threadsPerBlock.y = 1;//1024 / transformSizeX;
blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y));
}
*/
// Run the kernel function and meassure time
hipEventRecord(start, 0);
Convolution << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput, deviceConvLayerWeights, deviceConvResult);
cudaStatus = cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Transform addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventRecord(stop, 0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventSynchronize(stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventElapsedTime(&time, start, stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
time = time * 1000;
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = hipMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * pseudo_transformSizeY) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_input << "Transformed matrix:\n";
for (int k = 0; k < pseudo_transformSizeY; k++)
{
for (int j = 0; j < transformSizeX; j++)
{
COUT_input << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeX + j] << " ";
}
COUT_input << "\n";
}
COUT_input << "Convolution result:\n";
for (int k = 0; k < CONV_FINAL_SIZE; k++)
{
if (k % convLayerSizeX == 0)
{
COUT_input << "\n";
}
if (k % (convLayerSizeX * convLayerSizeY) == 0)
{
COUT_input << "Depth = " << k / (convLayerSizeX * convLayerSizeY) << "\n";
}
COUT_input << std::setprecision(1) << std::fixed << hostConvResult[k] << " ";
}
COUT_input << "\n\n";
// CLEAN UP
printf("Time for Convolution: %f us\n", time);
Error:
hipFree(deviceInputMatrix);
hipFree(deviceTransformedInput);
hipFree(deviceConvLayerWeights);
return 0;
}
| 6f16cc66fe40208d853e20251374008675c463a5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 1280;
int const LENGTH = 1280;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 128;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const transformSizeX_nodepth = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS;
//MASK :2X2
int const MASKX = 3;
int const MASKY = 3;
int const MASKPADX = convLayerSizeX % MASKX;
int const MASKPADY = convLayerSizeY % MASKY;
int const MASK_SIZE = MASKX * MASKY;
int const pseudo_transformSizeY = (convLayerSizeY + MASKPADY) * (convLayerSizeX + MASKPADX);
int const MASKS_IN_Y = convLayerSizeY / (MASKY + MASKPADY);
int const MASKS_IN_X = convLayerSizeX / (MASKX + MASKPADX);
#define COUT_input if (1) std::cout
#define COUT_result if (1) std::cout
__global__
void Convolution(float* inputMatrix, float* reducedMatrix, float* weights, float* result)
{
//__shared__ int flag;
int Y = blockIdx.x * MASK_SIZE + (threadIdx.x / transformSizeX);
int X = threadIdx.x % transformSizeX;
int maskX_offset = Y % MASKX;
int maskY_offset = (Y % MASK_SIZE) / MASKX;
if ((maskX_offset == maskY_offset))
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
for (int j = 0; j < NUM_OF_KERNELS; j++)
{
atomicAdd(result + (j * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[j * transformSizeX + X]);
}
}
__syncthreads();
if (!(maskX_offset == maskY_offset) && 0)
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
for (int j = 0; j < NUM_OF_KERNELS; j++)
{
atomicAdd(result + (j * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[j * transformSizeX + X]);
}
}
/*
int Y = blockIdx.x * MASK_SIZE + (threadIdx.x / transformSizeX);
int X = threadIdx.x % transformSizeX;
int Z = blockIdx.x / blocks_number;
printf("indx= %d\n", blockIdx.x);
int maskX_offset = Y % MASKX;
int maskY_offset = (Y % MASK_SIZE) / MASKX;
if (!(maskX_offset == maskY_offset))
{
int MaskNum = (Y / MASK_SIZE);
int masksX = (MaskNum % MASKS_IN_X) * MASKX;
int masksY = (MaskNum / MASKS_IN_X) * MASKY;
int convXNew = masksX + maskX_offset;
int convYNew = masksY + maskY_offset;
__syncthreads();
if(result[Z * convLayerSizeX * convLayerSizeY + masksY * convLayerSizeX + masksX] != 0 &&
result[Z * convLayerSizeX * convLayerSizeY + (convYNew + 1) * convLayerSizeX + masksX + 1] != 0 &&
result[Z * convLayerSizeX * convLayerSizeY + (convYNew + 2) * convLayerSizeX + masksX + 2] != 0 )
{
if (Z == 0)
{
printf("z= %d\n", Z);
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
atomicAdd(result + (Z * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[Z * transformSizeX + X]);
}
}
else
{
int MaskNum = (Y / MASK_SIZE);
int convXNew = (MaskNum % MASKS_IN_X) * MASKX + maskX_offset;
int convYNew = (MaskNum / MASKS_IN_X) * MASKY + maskY_offset;
if (Z == 0)
{
printf("z= %d\n", Z);
int inputX = convXNew * STRIDEX + X % CONV_RECP_SIZEY;
int inputY = convYNew * STRIDEY + (X % (transformSizeX_nodepth)) / CONV_RECP_SIZEX;
int inputZ = X / (transformSizeX_nodepth);
if (convXNew >= convLayerSizeX || convYNew >= convLayerSizeY)
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
else if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1)))
{
reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX];
}
else
{
reducedMatrix[(Y * transformSizeX) + X] = 0;
}
}
float reduced_value = reducedMatrix[Y * transformSizeX + X];
atomicAdd(result + (Z * convLayerSizeX * convLayerSizeY + convYNew * convLayerSizeX + convXNew), reduced_value * weights[Z * transformSizeX + X]);
__syncthreads();
}*/
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostTransformedInput = new float[pseudo_transformSizeY * transformSizeX]();
float* hostConvResult = new float[CONV_FINAL_SIZE]();
float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX];
// GENERATING INPUT
std::cout << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceTransformedInput;
float* deviceConvLayerWeights;
float* deviceConvResult;
cudaStatus = cudaMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceTransformedInput, (pseudo_transformSizeY * transformSizeX) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(transformSizeX * MASK_SIZE, 1);
dim3 blocksPerGrid(ceil(double(pseudo_transformSizeY) / double(MASK_SIZE)), 1);
/*
if (transformSizeY * transformSizeX > 1024) {
threadsPerBlock.x = transformSizeX;
threadsPerBlock.y = 1;//1024 / transformSizeX;
blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y));
}
*/
// Run the kernel function and meassure time
cudaEventRecord(start, 0);
Convolution << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput, deviceConvLayerWeights, deviceConvResult);
cudaStatus = cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Transform addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventRecord(stop, 0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventSynchronize(stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventElapsedTime(&time, start, stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
time = time * 1000;
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = cudaMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * pseudo_transformSizeY) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_input << "Transformed matrix:\n";
for (int k = 0; k < pseudo_transformSizeY; k++)
{
for (int j = 0; j < transformSizeX; j++)
{
COUT_input << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeX + j] << " ";
}
COUT_input << "\n";
}
COUT_input << "Convolution result:\n";
for (int k = 0; k < CONV_FINAL_SIZE; k++)
{
if (k % convLayerSizeX == 0)
{
COUT_input << "\n";
}
if (k % (convLayerSizeX * convLayerSizeY) == 0)
{
COUT_input << "Depth = " << k / (convLayerSizeX * convLayerSizeY) << "\n";
}
COUT_input << std::setprecision(1) << std::fixed << hostConvResult[k] << " ";
}
COUT_input << "\n\n";
// CLEAN UP
printf("Time for Convolution: %f us\n", time);
Error:
cudaFree(deviceInputMatrix);
cudaFree(deviceTransformedInput);
cudaFree(deviceConvLayerWeights);
return 0;
}
|
75d6b42043b117e916f78eb01e7def292c7f5960.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file nlsm_nuts.cpp
* @author Milinda Fernando ([email protected])
* @brief : NLSM to test with NUTS.
* @version 0.1
* @date 2020-04-03
* @copyright Copyright (c) 2020
*
*/
#include "nlsm.h"
#include "nlsmUtils.h"
#include "mpi.h"
#include "TreeNode.h"
#include "mesh.h"
#include <vector>
#include <iostream>
#include "octUtils.h"
#include "ets.h"
#include "enuts.h"
#include "assert.h"
#include "mathUtils.h"
#include "nlsmCtxGPU.cuh"
int main (int argc, char** argv)
{
// 0- NUTS 1-UTS
unsigned int ts_mode=0;
if(argc<2)
std::cout<<"Usage: "<<argv[0]<<" paramFile"<<std::endl;
if(argc>2)
ts_mode = std::atoi(argv[2]);
MPI_Init(&argc,&argv);
MPI_Comm comm=MPI_COMM_WORLD;
int rank,npes;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&npes);
int devicesCount;
hipGetDeviceCount(&devicesCount);
if(!rank)
printf("number of cuda devices: %d\n",devicesCount);
hipSetDevice(rank%devicesCount);
if (!rank) {
#ifdef NLSM_NONLINEAR
std::cout<<GRN<<"Compiled with NLSM_NONLINEAR"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_NONLINEAR"<<NRM<<std::endl;
#endif
#ifdef NLSM_COMPARE_WITH_ANALYTICAL_SOL
std::cout<<GRN<<"Compiled with NLSM_COMPARE_WITH_ANALYTICAL_SOL"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_COMPARE_WITH_ANALYTICAL_SOL"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_4TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_4TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_4TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_6TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_6TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_6TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_8TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_8TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_8TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
}
nlsm::timer::initFlops();
nlsm::timer::total_runtime.start();
//1 . read the parameter file.
if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl;
nlsm::readParamFile(argv[1],comm);
nlsm::dumpParamFile(std::cout,1,comm);
_InitializeHcurve(nlsm::NLSM_DIM);
m_uiMaxDepth=nlsm::NLSM_MAXDEPTH;
if(nlsm::NLSM_NUM_VARS%nlsm::NLSM_ASYNC_COMM_K!=0)
{
if(!rank) std::cout<<"[overlap communication error]: total NLSM_NUM_VARS: "<<nlsm::NLSM_NUM_VARS<<" is not divisable by NLSM_ASYNC_COMM_K: "<<nlsm::NLSM_ASYNC_COMM_K<<std::endl;
exit(0);
}
//2. generate the initial grid.
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){nlsm::initData(x,y,z,var);};
std::function<void(double,double,double,double,double*)> u_x_t=[](double x,double y,double z,double t,double*var){nlsm::analyticalSol(x,y,z,t,var);};
//std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){nlsm::KerrSchildData(x,y,z,var);};
const unsigned int interpVars=nlsm::NLSM_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<nlsm::NLSM_NUM_VARS;i++)
varIndex[i]=i;
DendroIntL localSz,globalSz;
double t_stat;
double t_stat_g[3];
if(nlsm::NLSM_ENABLE_BLOCK_ADAPTIVITY)
{
if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl;
const Point pt_min(nlsm::NLSM_BLK_MIN_X,nlsm::NLSM_BLK_MIN_Y,nlsm::NLSM_BLK_MIN_Z);
const Point pt_max(nlsm::NLSM_BLK_MAX_X,nlsm::NLSM_BLK_MAX_Y,nlsm::NLSM_BLK_MAX_Z);
nlsm::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-(binOp::fastLog2(nlsm::NLSM_ELE_ORDER)),m_uiMaxDepth,comm);
}else
{
if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl;
function2Octree(f_init,nlsm::NLSM_NUM_VARS,nlsm::NLSM_REFINE_VARIABLE_INDICES,nlsm::NLSM_NUM_REFINE_VARS,tmpNodes,m_uiMaxDepth,nlsm::NLSM_WAVELET_TOL,nlsm::NLSM_ELE_ORDER,comm);
}
ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),nlsm::NLSM_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,nlsm::NLSM_DENDRO_GRAIN_SZ,nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
mesh->setDomainBounds(Point(nlsm::NLSM_GRID_MIN_X,nlsm::NLSM_GRID_MIN_Y,nlsm::NLSM_GRID_MIN_Z), Point(nlsm::NLSM_GRID_MAX_X, nlsm::NLSM_GRID_MAX_Y,nlsm::NLSM_GRID_MAX_Z));
bool is_mindepth_refine_g = false;
// do{
// if(!rank)
// std::cout<<"enforce min depth refinement currently only works for block AMR for NLSM"<<std::endl;
// bool is_mindepth_refine = false;
// std::vector<unsigned int> refine_flag;
// refine_flag.reserve(mesh->getNumLocalMeshElements());
// const ot::TreeNode* pNodes = mesh->getAllElements().data();
// for(unsigned int ele = mesh->getElementLocalBegin(); ele < mesh->getElementLocalEnd(); ele++)
// {
// if(pNodes[ele].getLevel() < nlsm::NLSM_MINDEPTH)
// {
// refine_flag.push_back(OCT_SPLIT);
// is_mindepth_refine=true;
// }else
// {
// refine_flag.push_back(OCT_NO_CHANGE);
// }
// }
// MPI_Allreduce(&is_mindepth_refine,&is_mindepth_refine_g,1,MPI_C_BOOL,MPI_LOR,comm);
// if(is_mindepth_refine_g){
// mesh->setMeshRefinementFlags(refine_flag);
// ot::Mesh* newMesh = mesh->ReMesh();
// DendroIntL localSz = mesh->getNumLocalMeshElements();
// DendroIntL gSz_new, gSz_old;
// par::Mpi_Reduce(&localSz,&gSz_old,1,MPI_SUM,0,comm);
// localSz = newMesh->getNumLocalMeshElements();
// par::Mpi_Reduce(&localSz,&gSz_new,1,MPI_SUM,0,comm);
// if(!rank)
// std::cout<<"old mesh size: "<<gSz_old<<" new mesh size: "<<gSz_new<<std::endl;
// std::swap(newMesh,mesh);
// delete newMesh;
// }
// }while(is_mindepth_refine_g);
unsigned int lmin, lmax;
mesh->computeMinMaxLevel(lmin,lmax);
nlsm::NLSM_RK45_TIME_STEP_SIZE=nlsm::NLSM_CFL_FACTOR*((nlsm::NLSM_COMPD_MAX[0]-nlsm::NLSM_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) nlsm::NLSM_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
par::Mpi_Bcast(&nlsm::NLSM_RK45_TIME_STEP_SIZE,1,0,comm);
DendroIntL lblocks = mesh->getLocalBlockList().size();
DendroIntL gblocks =0;
par::Mpi_Reduce(&lblocks,&gblocks,1,MPI_SUM,0,comm);
if(!rank)
std::cout<<" number of blocks for coarset block level : "<<(m_uiMaxDepth-MAXDEAPTH_LEVEL_DIFF-1)<<" # blocks: "<<gblocks<<std::endl;
if(!rank)
std::cout<<" lmin: "<<lmin<<" lmax: "<<lmax<<std::endl;
if(!rank)
std::cout<<"ts_mode: "<<ts_mode<<std::endl;
const ts::ETSType tsType = ts::ETSType::RK4;
/*if(ts_mode == 0)
{
nlsm::NLSMCtx * appCtx = new nlsm::NLSMCtx(mesh);
ts::ExplicitNUTS<DendroScalar,nlsm::NLSMCtx>* enuts = new ts::ExplicitNUTS<DendroScalar,nlsm::NLSMCtx>(appCtx);
std::vector<double> ld_stat_g;
enuts->set_evolve_vars(appCtx->get_evolution_vars());
enuts->set_ets_coefficients(tsType);
const unsigned int rank_global = enuts->get_global_rank();
const unsigned int pt_remesh_freq = 5;//(1u<<(lmax-lmin-3))
for(enuts->init(); enuts->curr_time() < nlsm::NLSM_RK45_TIME_END ; enuts->evolve())
//for(enuts->init(); enuts->curr_time() < nlsm::NLSM_RK45_TIME_END ; enuts->evolve_with_remesh(pt_remesh_freq))
{
const DendroIntL step = enuts->curr_step();
const DendroScalar time = enuts->curr_time();
const bool isActive = enuts->is_active();
enuts->dump_load_statistics(std::cout);
if(!rank_global)
std::cout<<GRN<<"[Explicit NUTS]: Executing step : "<<enuts->curr_step()<<std::setw(10)<<"\tcurrent time :"<<enuts->curr_time()<<std::setw(10)<<"\t dt(min):"<<enuts->get_dt_min()<<std::setw(10)<<"\t dt(max):"<<enuts->get_dt_max()<<std::setw(10)<<"\t"<<NRM<<std::endl;
appCtx->terminal_output();
bool isRemesh = false;
if( (step % nlsm::NLSM_REMESH_TEST_FREQ) == 0 )
isRemesh = appCtx->is_remesh();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[Explicit NUTS]: Remesh triggered "<<std::endl;;
DVec eVars = appCtx->get_evolution_vars();
appCtx->remesh_and_gridtransfer(nlsm::NLSM_DENDRO_GRAIN_SZ, nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
//appCtx->terminal_output();
enuts->sync_with_mesh();
}
if((step % nlsm::NLSM_IO_OUTPUT_FREQ) == 0 )
appCtx -> write_vtu();
if( (step % nlsm::NLSM_CHECKPT_FREQ) == 0 )
appCtx -> write_checkpt();
//appCtx_ets->dump_pt(std::cout);
//appCtx_enuts->dump_pt(std::cout);
//ets->dump_pt(std::cout);
//enuts->dump_pt(std::cout);
#ifdef __PROFILE_ETS__
char fName[200];
std::ofstream f_ets, f_enuts;
sprintf(fName,"%s_enuts.prof",nlsm::NLSM_PROFILE_FILE_PREFIX.c_str());
if(!rank)
{
f_enuts.open (fName,std::fstream::app);
if(f_enuts.fail()) {std::cout<<fName<<" file open failed "<<std::endl; MPI_Abort(comm,0);}
}
enuts->dump_pt(f_enuts);
enuts->reset_pt();
if(!rank) f_ets.close();
if(!rank) f_enuts.close();
#endif
}
delete appCtx->get_mesh();
delete appCtx;
delete enuts;
}else */
if(ts_mode==1)
{
//UTS
//nlsm::NLSMCtx * appCtx = new nlsm::NLSMCtx(mesh);
nlsm::NLSMCtxGPU * appCtx = new nlsm::NLSMCtxGPU(mesh);
ts::ETS<DendroScalar,nlsm::NLSMCtxGPU>* ets = new ts::ETS<DendroScalar,nlsm::NLSMCtxGPU>(appCtx);
ets->set_evolve_vars(appCtx->get_evolution_vars());
ets->set_ets_coefficients(tsType);
for(ets->init(); ets->curr_time() < nlsm::NLSM_RK45_TIME_END ; ets->evolve())
{
const DendroIntL step = ets->curr_step();
const DendroScalar time = ets->curr_time();
const bool isActive = ets->is_active();
const unsigned int rank_global = ets->get_global_rank();
if( (step % nlsm::NLSM_REMESH_TEST_FREQ) == 0 )
{
appCtx->device_to_host_sync();
bool isRemesh = appCtx->is_remesh();
appCtx->terminal_output();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[ETS] : Remesh is triggered. \n";
appCtx->remesh_and_gridtransfer(nlsm::NLSM_DENDRO_GRAIN_SZ, nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
ets->sync_with_mesh();
}
if((step % nlsm::NLSM_IO_OUTPUT_FREQ) == 0 )
appCtx -> write_vtu();
if( (step % nlsm::NLSM_CHECKPT_FREQ) == 0 )
appCtx -> write_checkpt();
}
//appCtx_ets->dump_pt(std::cout);
//appCtx_enuts->dump_pt(std::cout);
//ets->dump_pt(std::cout);
//enuts->dump_pt(std::cout);
#ifdef __PROFILE_ETS__
char fName[200];
std::ofstream f_ets, f_enuts;
sprintf(fName,"%s_ets.prof",nlsm::NLSM_PROFILE_FILE_PREFIX.c_str());
if(!rank)
{
f_ets.open (fName,std::fstream::app);
if(f_ets.fail()) {std::cout<<fName<<" file open failed "<<std::endl; MPI_Abort(comm,0);}
}
ets->dump_pt(f_ets);
ets->reset_pt();
if(!rank) f_ets.close();
if(!rank) f_enuts.close();
#endif
}
delete appCtx->get_mesh();
delete appCtx;
delete ets;
}
MPI_Finalize();
return 0;
}
| 75d6b42043b117e916f78eb01e7def292c7f5960.cu | /**
* @file nlsm_nuts.cpp
* @author Milinda Fernando ([email protected])
* @brief : NLSM to test with NUTS.
* @version 0.1
* @date 2020-04-03
* @copyright Copyright (c) 2020
*
*/
#include "nlsm.h"
#include "nlsmUtils.h"
#include "mpi.h"
#include "TreeNode.h"
#include "mesh.h"
#include <vector>
#include <iostream>
#include "octUtils.h"
#include "ets.h"
#include "enuts.h"
#include "assert.h"
#include "mathUtils.h"
#include "nlsmCtxGPU.cuh"
int main (int argc, char** argv)
{
// 0- NUTS 1-UTS
unsigned int ts_mode=0;
if(argc<2)
std::cout<<"Usage: "<<argv[0]<<" paramFile"<<std::endl;
if(argc>2)
ts_mode = std::atoi(argv[2]);
MPI_Init(&argc,&argv);
MPI_Comm comm=MPI_COMM_WORLD;
int rank,npes;
MPI_Comm_rank(comm,&rank);
MPI_Comm_size(comm,&npes);
int devicesCount;
cudaGetDeviceCount(&devicesCount);
if(!rank)
printf("number of cuda devices: %d\n",devicesCount);
cudaSetDevice(rank%devicesCount);
if (!rank) {
#ifdef NLSM_NONLINEAR
std::cout<<GRN<<"Compiled with NLSM_NONLINEAR"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_NONLINEAR"<<NRM<<std::endl;
#endif
#ifdef NLSM_COMPARE_WITH_ANALYTICAL_SOL
std::cout<<GRN<<"Compiled with NLSM_COMPARE_WITH_ANALYTICAL_SOL"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_COMPARE_WITH_ANALYTICAL_SOL"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_4TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_4TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_4TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_6TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_6TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_6TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
#ifdef NLSM_USE_8TH_ORDER_DERIVS
std::cout<<GRN<<"Compiled with NLSM_USE_8TH_ORDER_DERIVS"<<NRM<<std::endl;
#else
std::cout<<RED<<"Compiled without NLSM_USE_8TH_ORDER_DERIVS"<<NRM<<std::endl;
#endif
}
nlsm::timer::initFlops();
nlsm::timer::total_runtime.start();
//1 . read the parameter file.
if(!rank) std::cout<<" reading parameter file :"<<argv[1]<<std::endl;
nlsm::readParamFile(argv[1],comm);
nlsm::dumpParamFile(std::cout,1,comm);
_InitializeHcurve(nlsm::NLSM_DIM);
m_uiMaxDepth=nlsm::NLSM_MAXDEPTH;
if(nlsm::NLSM_NUM_VARS%nlsm::NLSM_ASYNC_COMM_K!=0)
{
if(!rank) std::cout<<"[overlap communication error]: total NLSM_NUM_VARS: "<<nlsm::NLSM_NUM_VARS<<" is not divisable by NLSM_ASYNC_COMM_K: "<<nlsm::NLSM_ASYNC_COMM_K<<std::endl;
exit(0);
}
//2. generate the initial grid.
std::vector<ot::TreeNode> tmpNodes;
std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){nlsm::initData(x,y,z,var);};
std::function<void(double,double,double,double,double*)> u_x_t=[](double x,double y,double z,double t,double*var){nlsm::analyticalSol(x,y,z,t,var);};
//std::function<void(double,double,double,double*)> f_init=[](double x,double y,double z,double*var){nlsm::KerrSchildData(x,y,z,var);};
const unsigned int interpVars=nlsm::NLSM_NUM_VARS;
unsigned int varIndex[interpVars];
for(unsigned int i=0;i<nlsm::NLSM_NUM_VARS;i++)
varIndex[i]=i;
DendroIntL localSz,globalSz;
double t_stat;
double t_stat_g[3];
if(nlsm::NLSM_ENABLE_BLOCK_ADAPTIVITY)
{
if(!rank) std::cout<<YLW<<"Using block adaptive mesh. AMR disabled "<<NRM<<std::endl;
const Point pt_min(nlsm::NLSM_BLK_MIN_X,nlsm::NLSM_BLK_MIN_Y,nlsm::NLSM_BLK_MIN_Z);
const Point pt_max(nlsm::NLSM_BLK_MAX_X,nlsm::NLSM_BLK_MAX_Y,nlsm::NLSM_BLK_MAX_Z);
nlsm::blockAdaptiveOctree(tmpNodes,pt_min,pt_max,m_uiMaxDepth-(binOp::fastLog2(nlsm::NLSM_ELE_ORDER)),m_uiMaxDepth,comm);
}else
{
if(!rank) std::cout<<YLW<<"Using function2Octree. AMR enabled "<<NRM<<std::endl;
function2Octree(f_init,nlsm::NLSM_NUM_VARS,nlsm::NLSM_REFINE_VARIABLE_INDICES,nlsm::NLSM_NUM_REFINE_VARS,tmpNodes,m_uiMaxDepth,nlsm::NLSM_WAVELET_TOL,nlsm::NLSM_ELE_ORDER,comm);
}
ot::Mesh * mesh = ot::createMesh(tmpNodes.data(),tmpNodes.size(),nlsm::NLSM_ELE_ORDER,comm,1,ot::SM_TYPE::FDM,nlsm::NLSM_DENDRO_GRAIN_SZ,nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
mesh->setDomainBounds(Point(nlsm::NLSM_GRID_MIN_X,nlsm::NLSM_GRID_MIN_Y,nlsm::NLSM_GRID_MIN_Z), Point(nlsm::NLSM_GRID_MAX_X, nlsm::NLSM_GRID_MAX_Y,nlsm::NLSM_GRID_MAX_Z));
bool is_mindepth_refine_g = false;
// do{
// if(!rank)
// std::cout<<"enforce min depth refinement currently only works for block AMR for NLSM"<<std::endl;
// bool is_mindepth_refine = false;
// std::vector<unsigned int> refine_flag;
// refine_flag.reserve(mesh->getNumLocalMeshElements());
// const ot::TreeNode* pNodes = mesh->getAllElements().data();
// for(unsigned int ele = mesh->getElementLocalBegin(); ele < mesh->getElementLocalEnd(); ele++)
// {
// if(pNodes[ele].getLevel() < nlsm::NLSM_MINDEPTH)
// {
// refine_flag.push_back(OCT_SPLIT);
// is_mindepth_refine=true;
// }else
// {
// refine_flag.push_back(OCT_NO_CHANGE);
// }
// }
// MPI_Allreduce(&is_mindepth_refine,&is_mindepth_refine_g,1,MPI_C_BOOL,MPI_LOR,comm);
// if(is_mindepth_refine_g){
// mesh->setMeshRefinementFlags(refine_flag);
// ot::Mesh* newMesh = mesh->ReMesh();
// DendroIntL localSz = mesh->getNumLocalMeshElements();
// DendroIntL gSz_new, gSz_old;
// par::Mpi_Reduce(&localSz,&gSz_old,1,MPI_SUM,0,comm);
// localSz = newMesh->getNumLocalMeshElements();
// par::Mpi_Reduce(&localSz,&gSz_new,1,MPI_SUM,0,comm);
// if(!rank)
// std::cout<<"old mesh size: "<<gSz_old<<" new mesh size: "<<gSz_new<<std::endl;
// std::swap(newMesh,mesh);
// delete newMesh;
// }
// }while(is_mindepth_refine_g);
unsigned int lmin, lmax;
mesh->computeMinMaxLevel(lmin,lmax);
nlsm::NLSM_RK45_TIME_STEP_SIZE=nlsm::NLSM_CFL_FACTOR*((nlsm::NLSM_COMPD_MAX[0]-nlsm::NLSM_COMPD_MIN[0])*((1u<<(m_uiMaxDepth-lmax))/((double) nlsm::NLSM_ELE_ORDER))/((double)(1u<<(m_uiMaxDepth))));
par::Mpi_Bcast(&nlsm::NLSM_RK45_TIME_STEP_SIZE,1,0,comm);
DendroIntL lblocks = mesh->getLocalBlockList().size();
DendroIntL gblocks =0;
par::Mpi_Reduce(&lblocks,&gblocks,1,MPI_SUM,0,comm);
if(!rank)
std::cout<<" number of blocks for coarset block level : "<<(m_uiMaxDepth-MAXDEAPTH_LEVEL_DIFF-1)<<" # blocks: "<<gblocks<<std::endl;
if(!rank)
std::cout<<" lmin: "<<lmin<<" lmax: "<<lmax<<std::endl;
if(!rank)
std::cout<<"ts_mode: "<<ts_mode<<std::endl;
const ts::ETSType tsType = ts::ETSType::RK4;
/*if(ts_mode == 0)
{
nlsm::NLSMCtx * appCtx = new nlsm::NLSMCtx(mesh);
ts::ExplicitNUTS<DendroScalar,nlsm::NLSMCtx>* enuts = new ts::ExplicitNUTS<DendroScalar,nlsm::NLSMCtx>(appCtx);
std::vector<double> ld_stat_g;
enuts->set_evolve_vars(appCtx->get_evolution_vars());
enuts->set_ets_coefficients(tsType);
const unsigned int rank_global = enuts->get_global_rank();
const unsigned int pt_remesh_freq = 5;//(1u<<(lmax-lmin-3))
for(enuts->init(); enuts->curr_time() < nlsm::NLSM_RK45_TIME_END ; enuts->evolve())
//for(enuts->init(); enuts->curr_time() < nlsm::NLSM_RK45_TIME_END ; enuts->evolve_with_remesh(pt_remesh_freq))
{
const DendroIntL step = enuts->curr_step();
const DendroScalar time = enuts->curr_time();
const bool isActive = enuts->is_active();
enuts->dump_load_statistics(std::cout);
if(!rank_global)
std::cout<<GRN<<"[Explicit NUTS]: Executing step : "<<enuts->curr_step()<<std::setw(10)<<"\tcurrent time :"<<enuts->curr_time()<<std::setw(10)<<"\t dt(min):"<<enuts->get_dt_min()<<std::setw(10)<<"\t dt(max):"<<enuts->get_dt_max()<<std::setw(10)<<"\t"<<NRM<<std::endl;
appCtx->terminal_output();
bool isRemesh = false;
if( (step % nlsm::NLSM_REMESH_TEST_FREQ) == 0 )
isRemesh = appCtx->is_remesh();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[Explicit NUTS]: Remesh triggered "<<std::endl;;
DVec eVars = appCtx->get_evolution_vars();
appCtx->remesh_and_gridtransfer(nlsm::NLSM_DENDRO_GRAIN_SZ, nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
//appCtx->terminal_output();
enuts->sync_with_mesh();
}
if((step % nlsm::NLSM_IO_OUTPUT_FREQ) == 0 )
appCtx -> write_vtu();
if( (step % nlsm::NLSM_CHECKPT_FREQ) == 0 )
appCtx -> write_checkpt();
//appCtx_ets->dump_pt(std::cout);
//appCtx_enuts->dump_pt(std::cout);
//ets->dump_pt(std::cout);
//enuts->dump_pt(std::cout);
#ifdef __PROFILE_ETS__
char fName[200];
std::ofstream f_ets, f_enuts;
sprintf(fName,"%s_enuts.prof",nlsm::NLSM_PROFILE_FILE_PREFIX.c_str());
if(!rank)
{
f_enuts.open (fName,std::fstream::app);
if(f_enuts.fail()) {std::cout<<fName<<" file open failed "<<std::endl; MPI_Abort(comm,0);}
}
enuts->dump_pt(f_enuts);
enuts->reset_pt();
if(!rank) f_ets.close();
if(!rank) f_enuts.close();
#endif
}
delete appCtx->get_mesh();
delete appCtx;
delete enuts;
}else */
if(ts_mode==1)
{
//UTS
//nlsm::NLSMCtx * appCtx = new nlsm::NLSMCtx(mesh);
nlsm::NLSMCtxGPU * appCtx = new nlsm::NLSMCtxGPU(mesh);
ts::ETS<DendroScalar,nlsm::NLSMCtxGPU>* ets = new ts::ETS<DendroScalar,nlsm::NLSMCtxGPU>(appCtx);
ets->set_evolve_vars(appCtx->get_evolution_vars());
ets->set_ets_coefficients(tsType);
for(ets->init(); ets->curr_time() < nlsm::NLSM_RK45_TIME_END ; ets->evolve())
{
const DendroIntL step = ets->curr_step();
const DendroScalar time = ets->curr_time();
const bool isActive = ets->is_active();
const unsigned int rank_global = ets->get_global_rank();
if( (step % nlsm::NLSM_REMESH_TEST_FREQ) == 0 )
{
appCtx->device_to_host_sync();
bool isRemesh = appCtx->is_remesh();
appCtx->terminal_output();
if(isRemesh)
{
if(!rank_global)
std::cout<<"[ETS] : Remesh is triggered. \n";
appCtx->remesh_and_gridtransfer(nlsm::NLSM_DENDRO_GRAIN_SZ, nlsm::NLSM_LOAD_IMB_TOL,nlsm::NLSM_SPLIT_FIX);
ets->sync_with_mesh();
}
if((step % nlsm::NLSM_IO_OUTPUT_FREQ) == 0 )
appCtx -> write_vtu();
if( (step % nlsm::NLSM_CHECKPT_FREQ) == 0 )
appCtx -> write_checkpt();
}
//appCtx_ets->dump_pt(std::cout);
//appCtx_enuts->dump_pt(std::cout);
//ets->dump_pt(std::cout);
//enuts->dump_pt(std::cout);
#ifdef __PROFILE_ETS__
char fName[200];
std::ofstream f_ets, f_enuts;
sprintf(fName,"%s_ets.prof",nlsm::NLSM_PROFILE_FILE_PREFIX.c_str());
if(!rank)
{
f_ets.open (fName,std::fstream::app);
if(f_ets.fail()) {std::cout<<fName<<" file open failed "<<std::endl; MPI_Abort(comm,0);}
}
ets->dump_pt(f_ets);
ets->reset_pt();
if(!rank) f_ets.close();
if(!rank) f_enuts.close();
#endif
}
delete appCtx->get_mesh();
delete appCtx;
delete ets;
}
MPI_Finalize();
return 0;
}
|
ebbe28fc0a96ebb25507fe90291e808419bd98c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernels_hip.cuh"
#include "mosaic.cuh"
extern unsigned int c;
unsigned int block_size = 0;
/*
* When the average function has finished, call this function to reduce the sum
* to get the total average on the CPU.
*
* h_average_colour - The vector to reduce the block level results from.
*/
void CUDART_CB CallbackReduce(void *h_average_colour) {
uchar4 *h_data = (uchar4 *)h_average_colour;
unsigned int n = block_size;
long3 average = make_long3(0, 0, 0);
// Reduce the block level results on CPU
for (unsigned int i = 0; i < n; i++) {
// Load a single value and add to the total average.
uchar4 av = h_data[i];
average.x += (long)(av.x);
average.y += (long)(av.y);
average.z += (long)(av.z);
}
// Divide and round the totals to the closest integer to give the average.
average.x = div_round(average.x, n);
average.y = div_round(average.y, n);
average.z = div_round(average.z, n);
// Output the average colour value for the image
fprintf(stderr, "CUDA Average image colour red = %d, green = %d, blue = %d \n", average.x, average.y, average.z);
}
/*
* The main CUDA function which calls the kernels to do the mosaic and average
* functionality.
*
* image - The ppm struct of the image we are performing on.
* iteration - Which iteration of the code to use.
*
* returns - The new 'mosaic' ppm image.
*/
__host__ ppm *perform_CUDA(ppm *image, int iteration) {
unsigned int image_size = (image->width * image->height);
// Device declarations
uchar4 *d_rgb, *d_rgb_output, *d_average_colour;
uint4 *d_average;
// Create the steams that will be used in iterations 2,3
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
// Allocate memory on the CPU for the image
ppm *h_image = (ppm *)malloc(sizeof(ppm));
uchar4 *h_rgb = (uchar4 *)malloc(image_size * sizeof(uchar4));
uchar4 *h_average_colour = (uchar4 *)malloc(image_size * sizeof(uchar4));
uint4 *h_average = (uint4 *)malloc(sizeof(uint4));
// Cuda timing creation
hipEvent_t start, stop, execution, execution_stop;
float ms, ems;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&execution);
hipEventCreate(&execution_stop);
// Start timing
hipEventRecord(start, 0);
// Cuda layout for mosaic
// As max threads per block can be 1024 (32,32), if c is higher then default to this value.
int tpb = fmin(c, 32);
dim3 mosBlocksPerGrid((int)ceil((double)image->width / c), (int)ceil((double)image->height / c));
dim3 mosThreadsPerBlock(tpb, tpb);
// Work out the amount of work needed if c is greater than 32.
int work = c <= 32 ? 1 : (c * c) / (32 * 32);
block_size = mosBlocksPerGrid.x * mosBlocksPerGrid.y;
// Allocate memory on the GPU for the image
hipMalloc((void **)&d_rgb_output, image_size * sizeof(uchar4));
hipMalloc((void **)&d_rgb, image_size * sizeof(uchar4));
hipMalloc((void **)&d_average_colour, block_size * sizeof(uchar4));
hipMalloc((void **)&d_average, sizeof(uint4));
checkCUDAError("CUDA malloc");
// Copy the rgb values of the image to device memory
hipMemcpyAsync(d_rgb, image->rgb, image_size * sizeof(uchar4), hipMemcpyHostToDevice, stream1);
checkCUDAError("CUDA memcpy to device");
// Perform the mosaic filter
unsigned int sm_size = sizeof(uint4) * mosThreadsPerBlock.x * mosThreadsPerBlock.y;
if (iteration == 1) {
// 1D Texture Bind
hipBindTexture(0, ppmTexture1D, d_rgb, image_size * sizeof(uchar4));
checkCUDAError("ppmTexture1D bind");
// Block
average_CUDA_block << <mosBlocksPerGrid, mosThreadsPerBlock, sm_size >> > (d_rgb, d_average_colour, image->width, work);
checkCUDAError("mosaic block it1");
mosaic_CUDA_tile << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_average_colour, d_rgb_output, image->width, work);
checkCUDAError("mosaic tile it1");
}
else if (iteration == 2) {
// Warp and reduce
hipEventRecord(execution, 0);
average_CUDA_warp << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_rgb, d_average_colour, image->width, work, tpb);
checkCUDAError("mosaic warp it2");
hipDeviceSynchronize();
dim3 avThreadsPerBlock(1, 1);
reduce << < mosBlocksPerGrid, avThreadsPerBlock, 0, stream1 >> > (d_average_colour, d_average);
mosaic_CUDA_tile << <mosBlocksPerGrid, mosThreadsPerBlock, 0, stream2 >> > (d_average_colour, d_rgb_output, image->width, work);
checkCUDAError("mosaic tile it2");
}
else if (iteration == 3) {
// Block and reduce
hipEventRecord(execution, 0);
average_CUDA_block_final << <mosBlocksPerGrid, mosThreadsPerBlock, sm_size >> > (d_rgb, d_average_colour, image->width, work);
checkCUDAError("mosaic block it3");
hipDeviceSynchronize();
mosaic_CUDA_tile_final << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_average_colour, d_rgb_output, image->width, work, d_average);
checkCUDAError("mosaic tile it3");
}
// Sync
hipDeviceSynchronize();
if (iteration != 1) {
hipEventRecord(execution_stop, 0);
hipEventSynchronize(execution_stop);
hipEventElapsedTime(&ems, execution, execution_stop);
printf("CUDA mode execution time took %f ms\n", ems);
}
// Copy the image back from the GPU
hipMemcpyAsync(h_rgb, d_rgb_output, image_size * sizeof(uchar4), hipMemcpyDeviceToHost, stream1);
hipMemcpyAsync(h_average_colour, d_average_colour, block_size * sizeof(uchar4), hipMemcpyDeviceToHost, stream2);
hipMemcpyAsync(h_average, d_average, sizeof(ulong4), hipMemcpyDeviceToHost, stream3);
checkCUDAError("CUDA memcpy from device");
// Reduce the final values on the CPU
if(iteration == 1)
hipLaunchHostFunc(stream1, CallbackReduce, (void *)h_average_colour);
else
fprintf(stderr, "CUDA Average image colour red = %d, green = %d, blue = %d \n", div_round(h_average[0].x, block_size), div_round(h_average[0].y, block_size), div_round(h_average[0].z, block_size));
// End Timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
//output timings
printf("CUDA mode total time took %f ms\n", ms);
// Free device memory
hipFree(d_rgb);
hipFree(d_rgb_output);
hipFree(d_average_colour);
hipFree(d_average);
hipUnbindTexture(ppmTexture1D);
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
checkCUDAError("Cuda free and destroy");
// Put data back into a host ppm struct
h_image->height = image->height;
h_image->width = image->width;
h_image->rgb = h_rgb;
// Free host memory
free(h_average_colour);
free(h_average);
return h_image;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// A simple function that divides two ints and rounds to the nearest int.
unsigned int div_round(unsigned int a, unsigned int b) {
return (a + (b / 2)) / b;
} | ebbe28fc0a96ebb25507fe90291e808419bd98c1.cu | #include "kernels.cuh"
#include "mosaic.cuh"
extern unsigned int c;
unsigned int block_size = 0;
/*
* When the average function has finished, call this function to reduce the sum
* to get the total average on the CPU.
*
* h_average_colour - The vector to reduce the block level results from.
*/
void CUDART_CB CallbackReduce(void *h_average_colour) {
uchar4 *h_data = (uchar4 *)h_average_colour;
unsigned int n = block_size;
long3 average = make_long3(0, 0, 0);
// Reduce the block level results on CPU
for (unsigned int i = 0; i < n; i++) {
// Load a single value and add to the total average.
uchar4 av = h_data[i];
average.x += (long)(av.x);
average.y += (long)(av.y);
average.z += (long)(av.z);
}
// Divide and round the totals to the closest integer to give the average.
average.x = div_round(average.x, n);
average.y = div_round(average.y, n);
average.z = div_round(average.z, n);
// Output the average colour value for the image
fprintf(stderr, "CUDA Average image colour red = %d, green = %d, blue = %d \n", average.x, average.y, average.z);
}
/*
* The main CUDA function which calls the kernels to do the mosaic and average
* functionality.
*
* image - The ppm struct of the image we are performing on.
* iteration - Which iteration of the code to use.
*
* returns - The new 'mosaic' ppm image.
*/
__host__ ppm *perform_CUDA(ppm *image, int iteration) {
unsigned int image_size = (image->width * image->height);
// Device declarations
uchar4 *d_rgb, *d_rgb_output, *d_average_colour;
uint4 *d_average;
// Create the steams that will be used in iterations 2,3
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
// Allocate memory on the CPU for the image
ppm *h_image = (ppm *)malloc(sizeof(ppm));
uchar4 *h_rgb = (uchar4 *)malloc(image_size * sizeof(uchar4));
uchar4 *h_average_colour = (uchar4 *)malloc(image_size * sizeof(uchar4));
uint4 *h_average = (uint4 *)malloc(sizeof(uint4));
// Cuda timing creation
cudaEvent_t start, stop, execution, execution_stop;
float ms, ems;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&execution);
cudaEventCreate(&execution_stop);
// Start timing
cudaEventRecord(start, 0);
// Cuda layout for mosaic
// As max threads per block can be 1024 (32,32), if c is higher then default to this value.
int tpb = fmin(c, 32);
dim3 mosBlocksPerGrid((int)ceil((double)image->width / c), (int)ceil((double)image->height / c));
dim3 mosThreadsPerBlock(tpb, tpb);
// Work out the amount of work needed if c is greater than 32.
int work = c <= 32 ? 1 : (c * c) / (32 * 32);
block_size = mosBlocksPerGrid.x * mosBlocksPerGrid.y;
// Allocate memory on the GPU for the image
cudaMalloc((void **)&d_rgb_output, image_size * sizeof(uchar4));
cudaMalloc((void **)&d_rgb, image_size * sizeof(uchar4));
cudaMalloc((void **)&d_average_colour, block_size * sizeof(uchar4));
cudaMalloc((void **)&d_average, sizeof(uint4));
checkCUDAError("CUDA malloc");
// Copy the rgb values of the image to device memory
cudaMemcpyAsync(d_rgb, image->rgb, image_size * sizeof(uchar4), cudaMemcpyHostToDevice, stream1);
checkCUDAError("CUDA memcpy to device");
// Perform the mosaic filter
unsigned int sm_size = sizeof(uint4) * mosThreadsPerBlock.x * mosThreadsPerBlock.y;
if (iteration == 1) {
// 1D Texture Bind
cudaBindTexture(0, ppmTexture1D, d_rgb, image_size * sizeof(uchar4));
checkCUDAError("ppmTexture1D bind");
// Block
average_CUDA_block << <mosBlocksPerGrid, mosThreadsPerBlock, sm_size >> > (d_rgb, d_average_colour, image->width, work);
checkCUDAError("mosaic block it1");
mosaic_CUDA_tile << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_average_colour, d_rgb_output, image->width, work);
checkCUDAError("mosaic tile it1");
}
else if (iteration == 2) {
// Warp and reduce
cudaEventRecord(execution, 0);
average_CUDA_warp << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_rgb, d_average_colour, image->width, work, tpb);
checkCUDAError("mosaic warp it2");
cudaDeviceSynchronize();
dim3 avThreadsPerBlock(1, 1);
reduce << < mosBlocksPerGrid, avThreadsPerBlock, 0, stream1 >> > (d_average_colour, d_average);
mosaic_CUDA_tile << <mosBlocksPerGrid, mosThreadsPerBlock, 0, stream2 >> > (d_average_colour, d_rgb_output, image->width, work);
checkCUDAError("mosaic tile it2");
}
else if (iteration == 3) {
// Block and reduce
cudaEventRecord(execution, 0);
average_CUDA_block_final << <mosBlocksPerGrid, mosThreadsPerBlock, sm_size >> > (d_rgb, d_average_colour, image->width, work);
checkCUDAError("mosaic block it3");
cudaDeviceSynchronize();
mosaic_CUDA_tile_final << <mosBlocksPerGrid, mosThreadsPerBlock >> > (d_average_colour, d_rgb_output, image->width, work, d_average);
checkCUDAError("mosaic tile it3");
}
// Sync
cudaDeviceSynchronize();
if (iteration != 1) {
cudaEventRecord(execution_stop, 0);
cudaEventSynchronize(execution_stop);
cudaEventElapsedTime(&ems, execution, execution_stop);
printf("CUDA mode execution time took %f ms\n", ems);
}
// Copy the image back from the GPU
cudaMemcpyAsync(h_rgb, d_rgb_output, image_size * sizeof(uchar4), cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(h_average_colour, d_average_colour, block_size * sizeof(uchar4), cudaMemcpyDeviceToHost, stream2);
cudaMemcpyAsync(h_average, d_average, sizeof(ulong4), cudaMemcpyDeviceToHost, stream3);
checkCUDAError("CUDA memcpy from device");
// Reduce the final values on the CPU
if(iteration == 1)
cudaLaunchHostFunc(stream1, CallbackReduce, (void *)h_average_colour);
else
fprintf(stderr, "CUDA Average image colour red = %d, green = %d, blue = %d \n", div_round(h_average[0].x, block_size), div_round(h_average[0].y, block_size), div_round(h_average[0].z, block_size));
// End Timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
//output timings
printf("CUDA mode total time took %f ms\n", ms);
// Free device memory
cudaFree(d_rgb);
cudaFree(d_rgb_output);
cudaFree(d_average_colour);
cudaFree(d_average);
cudaUnbindTexture(ppmTexture1D);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
checkCUDAError("Cuda free and destroy");
// Put data back into a host ppm struct
h_image->height = image->height;
h_image->width = image->width;
h_image->rgb = h_rgb;
// Free host memory
free(h_average_colour);
free(h_average);
return h_image;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// A simple function that divides two ints and rounds to the nearest int.
unsigned int div_round(unsigned int a, unsigned int b) {
return (a + (b / 2)) / b;
} |
bc8d842df1d5c1a28721658f77c99c710cec55d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define ARRAY_SIZE 8
#define BLOCK_SIZE 16
__global__ void preSum(int *g_num, int *ans,int size)
{
int tx = threadIdx.x;
int pOut = 0,pIn = 1;
__shared__ int s_num[2][BLOCK_SIZE];
s_num[0][tx] = g_num[tx];
s_num[1][tx] = s_num[0][tx];
__syncthreads();
for(int offset = 1; offset < size; offset *= 2)
{
pOut = 1 - pOut;
pIn = 1 - pOut;
if(tx >= offset)
s_num[pOut][tx] = s_num[pIn][tx] + s_num[pIn][tx - offset];
else
s_num[pOut][tx] = s_num[pIn][tx];
__syncthreads();
}
ans[tx] = s_num[pOut][tx];
}
int main()
{
int num[8] = {3,1,7,0,4,1,6,3};
int *h_num = (int*)malloc(ARRAY_SIZE);
int *h_ans = (int*)malloc(ARRAY_SIZE);
int *d_num, *d_ans;
hipMalloc((void **)&d_num,sizeof(int) * ARRAY_SIZE);
hipMalloc((void **)&d_ans,sizeof(int) * ARRAY_SIZE);
for(int i = 0; i < ARRAY_SIZE; ++i)
h_num[i] = num[i];
printf("%d\n",h_num[i]);
hipMemcpy(d_num,h_num,sizeof(int) * ARRAY_SIZE,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( preSum), dim3(1),dim3(BLOCK_SIZE), 0, 0, d_num,d_ans,ARRAY_SIZE);
hipMemcpy(h_ans,d_ans,sizeof(int) * ARRAY_SIZE,hipMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; ++i)
printf("i %d %d\n",i,h_ans[i]);
printf("\n");
// free(h_num);
// free(h_ans);
// hipFree(d_num);
// hipFree(d_ans);
return 0;
}
| bc8d842df1d5c1a28721658f77c99c710cec55d3.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define ARRAY_SIZE 8
#define BLOCK_SIZE 16
__global__ void preSum(int *g_num, int *ans,int size)
{
int tx = threadIdx.x;
int pOut = 0,pIn = 1;
__shared__ int s_num[2][BLOCK_SIZE];
s_num[0][tx] = g_num[tx];
s_num[1][tx] = s_num[0][tx];
__syncthreads();
for(int offset = 1; offset < size; offset *= 2)
{
pOut = 1 - pOut;
pIn = 1 - pOut;
if(tx >= offset)
s_num[pOut][tx] = s_num[pIn][tx] + s_num[pIn][tx - offset];
else
s_num[pOut][tx] = s_num[pIn][tx];
__syncthreads();
}
ans[tx] = s_num[pOut][tx];
}
int main()
{
int num[8] = {3,1,7,0,4,1,6,3};
int *h_num = (int*)malloc(ARRAY_SIZE);
int *h_ans = (int*)malloc(ARRAY_SIZE);
int *d_num, *d_ans;
cudaMalloc((void **)&d_num,sizeof(int) * ARRAY_SIZE);
cudaMalloc((void **)&d_ans,sizeof(int) * ARRAY_SIZE);
for(int i = 0; i < ARRAY_SIZE; ++i)
h_num[i] = num[i];
printf("%d\n",h_num[i]);
cudaMemcpy(d_num,h_num,sizeof(int) * ARRAY_SIZE,cudaMemcpyHostToDevice);
preSum<<<1,BLOCK_SIZE>>>(d_num,d_ans,ARRAY_SIZE);
cudaMemcpy(h_ans,d_ans,sizeof(int) * ARRAY_SIZE,cudaMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; ++i)
printf("i %d %d\n",i,h_ans[i]);
printf("\n");
// free(h_num);
// free(h_ans);
// cudaFree(d_num);
// cudaFree(d_ans);
return 0;
}
|
c1071af9edc88a4fc284be625a17b82a1282da0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "GOL_CUDA.h"
#include <hip/hip_runtime_api.h>
#include "lcutil.h"
#include "timestamp.h"
__global__ void GOL_Simulation(int rows, int columns, int plainSize, char *prevPlain, char *currPlain)
{
const unsigned int thread_pos = blockIdx.x * blockDim.x + threadIdx.x;
int totalNeighbours, upperNeighbours, sideNeighbours, lowerNeighbours;
//total number of pixels up to the row over the current point
//and the number of pixels up to the current row
//simplifying the expressions for neighbouring positions
int upperPlainPixels, pixelsUpToRow;
//the y coordinates on the left and right of the current point
//keeping in mind our plain is interconnected
int leftY, rightY;
//coordinates of current point
int currX, currY;
//taking upper bound number of blocks
//some threads will be obsolete
if(thread_pos < plainSize)
{
currX = thread_pos / columns;
currY = thread_pos - currX * columns;
leftY = (currY + columns - 1) % columns;
rightY = (currY + 1) % columns;
upperPlainPixels = columns * ((currX + rows - 1) % rows);
pixelsUpToRow = columns * ((currX + 1) % rows);
//calculating the neighbours
upperNeighbours = prevPlain[upperPlainPixels + leftY] + prevPlain[upperPlainPixels + currY] + prevPlain[upperPlainPixels + rightY];
sideNeighbours = prevPlain[currX * columns + leftY] + prevPlain[currX * columns + rightY];
lowerNeighbours = prevPlain[pixelsUpToRow + leftY] + prevPlain[pixelsUpToRow + currY] + prevPlain[pixelsUpToRow + rightY];
//add the three groups of neighbours
totalNeighbours = upperNeighbours + sideNeighbours + lowerNeighbours;
//pixel survives
if(totalNeighbours == 3 || (totalNeighbours == 2 && prevPlain[thread_pos] == 1))
{
currPlain[thread_pos] = 1;
}else//pixel dies
{
currPlain[thread_pos] = 0;
}
}
}
//function used to print error messages for cuda errors
int checkFunction(hipError_t functionOutput, const char *msg)
{
if (functionOutput != hipSuccess)
{
perror(msg);
exit(functionOutput);
}
return 0;
}
extern "C" float GPU_GOL(int rows, int columns, int iterations, char **prevPlain, char **currPlain)
{
int totalPixels = rows * columns;
int plainMemory = totalPixels * sizeof(char);
int iteration;
double microseconds;
//plains stored in gpu memory
char *d_prevPlain, *d_currPlain, *d_tmpPlain;
//block and threads variables for CUDA code
const int THREADS_PER_BLOCK = 1024;
dim3 blockNum(THREADS_PER_BLOCK);
dim3 threadNum(FRACTION_CEILING(totalPixels, THREADS_PER_BLOCK));
//allocating memory for the GPU plains and copying data from CPU to GPU
checkFunction(hipMalloc((void**) &d_prevPlain, plainMemory), "Failed to allocate memory for GPU previous plain");
checkFunction(hipMemcpy(d_prevPlain, &(prevPlain[0][0]), plainMemory, hipMemcpyHostToDevice), "Failed to copy plain from CPU to GPU");
checkFunction(hipMalloc((void**) &d_currPlain, plainMemory), "Failed to allocate memory for GPU previous plain");
timestamp t_start;
t_start = getTimestamp();
//actual simulation execution
for(iteration = 0; iteration < iterations; iteration++)
{
hipLaunchKernelGGL(( GOL_Simulation), dim3(threadNum), dim3(blockNum), 0, 0, rows, columns, totalPixels, d_prevPlain, d_currPlain);
//checking whether an error occured during current GOL simulation's iteration
checkFunction(hipGetLastError(), "Error occured during GPU execution");
//current plain is the previous updated one
d_tmpPlain = d_prevPlain;
d_prevPlain = d_currPlain;
d_currPlain = d_tmpPlain;
}
hipDeviceSynchronize();
microseconds = getElapsedtime(t_start);
//copy the final plain from gpu to cpu memory
checkFunction(hipMemcpy(&(currPlain[0][0]), d_currPlain, plainMemory, hipMemcpyDeviceToHost), "Failed to copy plain from GPU to CPU");
//freeing the memory allocated in the gpu
checkFunction(hipFree(d_prevPlain), "Failed to free GPU previous plain");
checkFunction(hipFree(d_currPlain), "Failed to free GPU current plain");
return microseconds;
} | c1071af9edc88a4fc284be625a17b82a1282da0e.cu | #include "GOL_CUDA.h"
#include <cuda_runtime_api.h>
#include "lcutil.h"
#include "timestamp.h"
__global__ void GOL_Simulation(int rows, int columns, int plainSize, char *prevPlain, char *currPlain)
{
const unsigned int thread_pos = blockIdx.x * blockDim.x + threadIdx.x;
int totalNeighbours, upperNeighbours, sideNeighbours, lowerNeighbours;
//total number of pixels up to the row over the current point
//and the number of pixels up to the current row
//simplifying the expressions for neighbouring positions
int upperPlainPixels, pixelsUpToRow;
//the y coordinates on the left and right of the current point
//keeping in mind our plain is interconnected
int leftY, rightY;
//coordinates of current point
int currX, currY;
//taking upper bound number of blocks
//some threads will be obsolete
if(thread_pos < plainSize)
{
currX = thread_pos / columns;
currY = thread_pos - currX * columns;
leftY = (currY + columns - 1) % columns;
rightY = (currY + 1) % columns;
upperPlainPixels = columns * ((currX + rows - 1) % rows);
pixelsUpToRow = columns * ((currX + 1) % rows);
//calculating the neighbours
upperNeighbours = prevPlain[upperPlainPixels + leftY] + prevPlain[upperPlainPixels + currY] + prevPlain[upperPlainPixels + rightY];
sideNeighbours = prevPlain[currX * columns + leftY] + prevPlain[currX * columns + rightY];
lowerNeighbours = prevPlain[pixelsUpToRow + leftY] + prevPlain[pixelsUpToRow + currY] + prevPlain[pixelsUpToRow + rightY];
//add the three groups of neighbours
totalNeighbours = upperNeighbours + sideNeighbours + lowerNeighbours;
//pixel survives
if(totalNeighbours == 3 || (totalNeighbours == 2 && prevPlain[thread_pos] == 1))
{
currPlain[thread_pos] = 1;
}else//pixel dies
{
currPlain[thread_pos] = 0;
}
}
}
//function used to print error messages for cuda errors
int checkFunction(cudaError_t functionOutput, const char *msg)
{
if (functionOutput != cudaSuccess)
{
perror(msg);
exit(functionOutput);
}
return 0;
}
extern "C" float GPU_GOL(int rows, int columns, int iterations, char **prevPlain, char **currPlain)
{
int totalPixels = rows * columns;
int plainMemory = totalPixels * sizeof(char);
int iteration;
double microseconds;
//plains stored in gpu memory
char *d_prevPlain, *d_currPlain, *d_tmpPlain;
//block and threads variables for CUDA code
const int THREADS_PER_BLOCK = 1024;
dim3 blockNum(THREADS_PER_BLOCK);
dim3 threadNum(FRACTION_CEILING(totalPixels, THREADS_PER_BLOCK));
//allocating memory for the GPU plains and copying data from CPU to GPU
checkFunction(cudaMalloc((void**) &d_prevPlain, plainMemory), "Failed to allocate memory for GPU previous plain");
checkFunction(cudaMemcpy(d_prevPlain, &(prevPlain[0][0]), plainMemory, cudaMemcpyHostToDevice), "Failed to copy plain from CPU to GPU");
checkFunction(cudaMalloc((void**) &d_currPlain, plainMemory), "Failed to allocate memory for GPU previous plain");
timestamp t_start;
t_start = getTimestamp();
//actual simulation execution
for(iteration = 0; iteration < iterations; iteration++)
{
GOL_Simulation<<<threadNum, blockNum>>>(rows, columns, totalPixels, d_prevPlain, d_currPlain);
//checking whether an error occured during current GOL simulation's iteration
checkFunction(cudaGetLastError(), "Error occured during GPU execution");
//current plain is the previous updated one
d_tmpPlain = d_prevPlain;
d_prevPlain = d_currPlain;
d_currPlain = d_tmpPlain;
}
cudaDeviceSynchronize();
microseconds = getElapsedtime(t_start);
//copy the final plain from gpu to cpu memory
checkFunction(cudaMemcpy(&(currPlain[0][0]), d_currPlain, plainMemory, cudaMemcpyDeviceToHost), "Failed to copy plain from GPU to CPU");
//freeing the memory allocated in the gpu
checkFunction(cudaFree(d_prevPlain), "Failed to free GPU previous plain");
checkFunction(cudaFree(d_currPlain), "Failed to free GPU current plain");
return microseconds;
} |
f13a0a5870af970ee5c886cf287e1f8020b99882.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_2.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| f13a0a5870af970ee5c886cf287e1f8020b99882.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S1_2.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6775309540028,0.00126031074107193,0.782379594133090,0.782216749001106,0.000172068343086772,0.486227463562957,0.00291750746806204,0.999998383839518,1.89860165324306e-08,1.86371442934849e-05,0.999771183306077,1.00730952275387,0.999997729764813,4.01181567168462e-05,0.661435383223664,9.89216406636310,139.601234209998}; for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={13.9645635317638,0.000234559273515713,0.000158508496150117,0.000387718953473422,0.271550011299244,0.171313643894679,0.148132634408518,3.52429749186627,0.0163232963007063,1.80625170161156,1099.99984094905,0.000508428591582056,0.426315288126368,0.0193610246251599,0.00342305438925442,2.79133840240607e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
7bda1b9f66f4c3a400911c21eb339fd1dbc5aa4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "encoder.h"
#include "kernels/transformerKernels.h"
/**
@file
Transformer encoder, composed by gemm lib and
custom cuda kernel function
*/
namespace lightseq {
namespace cuda {
template <OperationType OpType_>
Encoder<OpType_>::Encoder(int max_batch_size, const int *p_d_token_id,
int *p_d_padding_mask, _DataType *p_d_output,
const TransformerWeight<OpType_> &tw,
hipStream_t stream, hipblasHandle_t hd)
: _max_batch_size(max_batch_size),
_p_d_token_id(p_d_token_id),
_p_d_padding_mask(p_d_padding_mask),
_p_d_output(p_d_output),
_tw(tw),
_stream(stream),
_hd(hd),
_p_d_src_emb_wei(tw.get_src_emb_wei()),
_p_d_enc_wei(tw.get_enc_wei()),
_fone((_DataType)1.f),
_fzero((_DataType)0.f),
_atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)),
_max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size),
_max_thread_per_block(1024) {}
/**
Compute GPU memory size needed by transformer encoder,
to see how these memory is used, checkout init_buffer() for detail
*/
template <OperationType OpType_>
long Encoder<OpType_>::compute_buffer_bytesize() {
long sz1 = _max_batch_dim * 6 +
_max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step;
long sz2 = _max_batch_dim + _max_batch_size * _tw._max_step * _tw._inner_size;
return max(sz1, sz2) * sizeof(_DataType);
}
/**
Init the GPU memory pointer which point to
the memory buffer needed by encoder.
These buffer are used during custom cuda kernel function,
find the corresponding function to see how these buffer are used
*/
template <OperationType OpType_>
void Encoder<OpType_>::init_buffer(void *pbuf) {
_DataType *p_d_buf = reinterpret_cast<_DataType *>(pbuf);
_p_d_qkv_projected = p_d_buf;
_p_d_q = _p_d_qkv_projected + _max_batch_dim * 3;
_p_d_k = _p_d_q + _max_batch_dim;
_p_d_v = _p_d_k + _max_batch_dim;
_p_d_c = _p_d_v + _max_batch_dim;
_p_d_ffn_buf1 = p_d_buf;
_p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim;
return;
}
/**
Some requirements needed by custom cuda kernel function
*/
template <OperationType OpType_>
std::string Encoder<OpType_>::check() {
// if (_max_thread_per_block < _tw._hidden_size) {
// return "violate hidden_size <= max_thread_per_block";
// }
if (_tw._inner_size & 1) {
return "violate inner_size % 2 = 0";
}
if (_tw._dim_per_head & 1) {
return "violate dim_per_head % 2 = 0";
}
if (_p_d_src_emb_wei.size() != 4) {
return "violate p_d_src_emb_wei.size() = 4";
}
if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) {
return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer";
}
return "";
}
/**
Encoder inference
*/
template <OperationType OpType_>
void Encoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) {
/* ---step1. init--- */
_batch_size = batch_size;
_batch_seq_len = batch_seq_len;
_batch_token_num = batch_size * batch_seq_len;
#ifdef DEBUG_RESULT
std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len
<< std::endl;
print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len);
#endif
/* ---step2. encoder feedforward--- */
ker_enc_embedding_launcher<_DataType>(
batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0],
_p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask,
_tw._padding_id, _max_thread_per_block);
#ifdef DEBUG_RESULT
print_vec(_p_d_output, "encoder embedding(head):", 5);
print_vec(_p_d_output + _batch_token_num * _tw._hidden_size - 5,
"encoder embedding(tail):", 5);
#endif
for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_enc_layer;
self_attention();
ffn_add_norm();
}
// last layer norm
ker_norm_layer_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output,
_p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block);
#ifdef DEBUG_RESULT
for (int i = 0; i < _batch_size; i++) { // batch_id
for (int j = 0; j < _batch_seq_len; j++) { // token_id
std::cout << "encoder output: token-" << j << std::endl;
print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size +
j * _tw._hidden_size,
"encoder_output", _tw._dim_per_head);
}
} // not normal
#endif
return;
}
/**
Encoder self attention
*/
template <OperationType OpType_>
void Encoder<OpType_>::self_attention() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_q,
_p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1],
_p_d_enc_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln);
/* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head
* gemm--- */
CHECK_GPU_ERROR(hipblasGemmEx(
_hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType,
_tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero,
_p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// get q, k, v by split and reshape qkv
ker_arrange_encself_qkv_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected,
_p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block);
/* ---step 2. correlation = q * k, perform softmax on correlation--- */
CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx(
_hd, HIPBLAS_OP_T, HIPBLAS_OP_N, _batch_seq_len, _batch_seq_len,
_tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType,
_batch_seq_len, _batch_seq_len * _batch_seq_len,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
ker_correlation_softmax_encself_launcher<_DataType>(
_batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c,
_p_d_padding_mask);
/* ---step 3. new_q = correlation * v--- */
CHECK_GPU_ERROR(hipblasGemmStridedBatchedEx(
_hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._dim_per_head, _batch_seq_len,
_batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len,
_batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType,
_tw._dim_per_head, _batch_seq_len * _tw._dim_per_head,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// use v to save reshaped q, since they are in same size and v
// will not be use again before the next multi-head-attention
ker_arrange_atten_output_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v,
_batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block);
/* ---step 4. new_q = ori_q + new_q * output_wei--- */
CHECK_GPU_ERROR(hipblasGemmEx(
_hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType,
_tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_output,
_CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
template <OperationType OpType_>
void Encoder<OpType_>::ffn_add_norm() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_ffn_buf1,
_p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7],
_p_d_enc_wei[_weight_offset + 11], _max_thread_per_block,
_tw._is_post_ln);
/* ---step 1. first ffn layer--- */
CHECK_GPU_ERROR(hipblasGemmEx(
_hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._inner_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType,
_tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero,
_p_d_ffn_buf2, _CType, _tw._inner_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
if (_tw._use_gelu) {
ker_bias_gelu_launcher<_DataType>(
_batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
} else {
ker_bias_relu_launcher<_DataType>(
_batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
}
/* ---step 2. second ffn layer--- */
CHECK_GPU_ERROR(hipblasGemmEx(
_hd, HIPBLAS_OP_N, HIPBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType,
_tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone,
_p_d_output, _CType, _tw._hidden_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
template class Encoder<OperationType::FP16>;
template class Encoder<OperationType::FP32>;
} // namespace cuda
} // namespace lightseq
| 7bda1b9f66f4c3a400911c21eb339fd1dbc5aa4b.cu | #include "encoder.h"
#include "kernels/transformerKernels.h"
/**
@file
Transformer encoder, composed by gemm lib and
custom cuda kernel function
*/
namespace lightseq {
namespace cuda {
template <OperationType OpType_>
Encoder<OpType_>::Encoder(int max_batch_size, const int *p_d_token_id,
int *p_d_padding_mask, _DataType *p_d_output,
const TransformerWeight<OpType_> &tw,
cudaStream_t stream, cublasHandle_t hd)
: _max_batch_size(max_batch_size),
_p_d_token_id(p_d_token_id),
_p_d_padding_mask(p_d_padding_mask),
_p_d_output(p_d_output),
_tw(tw),
_stream(stream),
_hd(hd),
_p_d_src_emb_wei(tw.get_src_emb_wei()),
_p_d_enc_wei(tw.get_enc_wei()),
_fone((_DataType)1.f),
_fzero((_DataType)0.f),
_atten_scaler((_DataType)sqrt(1.f / tw._dim_per_head)),
_max_batch_dim(max_batch_size * tw._max_step * tw._hidden_size),
_max_thread_per_block(1024) {}
/**
Compute GPU memory size needed by transformer encoder,
to see how these memory is used, checkout init_buffer() for detail
*/
template <OperationType OpType_>
long Encoder<OpType_>::compute_buffer_bytesize() {
long sz1 = _max_batch_dim * 6 +
_max_batch_size * _tw._head_num * _tw._max_step * _tw._max_step;
long sz2 = _max_batch_dim + _max_batch_size * _tw._max_step * _tw._inner_size;
return max(sz1, sz2) * sizeof(_DataType);
}
/**
Init the GPU memory pointer which point to
the memory buffer needed by encoder.
These buffer are used during custom cuda kernel function,
find the corresponding function to see how these buffer are used
*/
template <OperationType OpType_>
void Encoder<OpType_>::init_buffer(void *pbuf) {
_DataType *p_d_buf = reinterpret_cast<_DataType *>(pbuf);
_p_d_qkv_projected = p_d_buf;
_p_d_q = _p_d_qkv_projected + _max_batch_dim * 3;
_p_d_k = _p_d_q + _max_batch_dim;
_p_d_v = _p_d_k + _max_batch_dim;
_p_d_c = _p_d_v + _max_batch_dim;
_p_d_ffn_buf1 = p_d_buf;
_p_d_ffn_buf2 = _p_d_ffn_buf1 + _max_batch_dim;
return;
}
/**
Some requirements needed by custom cuda kernel function
*/
template <OperationType OpType_>
std::string Encoder<OpType_>::check() {
// if (_max_thread_per_block < _tw._hidden_size) {
// return "violate hidden_size <= max_thread_per_block";
// }
if (_tw._inner_size & 1) {
return "violate inner_size % 2 = 0";
}
if (_tw._dim_per_head & 1) {
return "violate dim_per_head % 2 = 0";
}
if (_p_d_src_emb_wei.size() != 4) {
return "violate p_d_src_emb_wei.size() = 4";
}
if (_p_d_enc_wei.size() != _tw._weight_per_enc_layer * _tw._n_enc_layer) {
return "violate p_d_enc_wei.size() = weight_per_enc_layer * n_enc_layer";
}
return "";
}
/**
Encoder inference
*/
template <OperationType OpType_>
void Encoder<OpType_>::run_one_infer(int batch_size, int batch_seq_len) {
/* ---step1. init--- */
_batch_size = batch_size;
_batch_seq_len = batch_seq_len;
_batch_token_num = batch_size * batch_seq_len;
#ifdef DEBUG_RESULT
std::cout << "batch_size-" << batch_size << " batch_seq_len-" << batch_seq_len
<< std::endl;
print_vec(_p_d_token_id, "batch_token_ids", batch_size * batch_seq_len);
#endif
/* ---step2. encoder feedforward--- */
ker_enc_embedding_launcher<_DataType>(
batch_size, batch_seq_len, _tw._hidden_size, _stream, _p_d_src_emb_wei[0],
_p_d_src_emb_wei[1], _p_d_token_id, _p_d_output, _p_d_padding_mask,
_tw._padding_id, _max_thread_per_block);
#ifdef DEBUG_RESULT
print_vec(_p_d_output, "encoder embedding(head):", 5);
print_vec(_p_d_output + _batch_token_num * _tw._hidden_size - 5,
"encoder embedding(tail):", 5);
#endif
for (_layer_id = 0; _layer_id < _tw._n_enc_layer; _layer_id++) {
_weight_offset = _layer_id * _tw._weight_per_enc_layer;
self_attention();
ffn_add_norm();
}
// last layer norm
ker_norm_layer_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output,
_p_d_src_emb_wei[2], _p_d_src_emb_wei[3], _max_thread_per_block);
#ifdef DEBUG_RESULT
for (int i = 0; i < _batch_size; i++) { // batch_id
for (int j = 0; j < _batch_seq_len; j++) { // token_id
std::cout << "encoder output: token-" << j << std::endl;
print_vec(_p_d_output + i * _batch_seq_len * _tw._hidden_size +
j * _tw._hidden_size,
"encoder_output", _tw._dim_per_head);
}
} // not normal
#endif
return;
}
/**
Encoder self attention
*/
template <OperationType OpType_>
void Encoder<OpType_>::self_attention() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_q,
_p_d_enc_wei[_weight_offset], _p_d_enc_wei[_weight_offset + 1],
_p_d_enc_wei[_weight_offset + 5], _max_thread_per_block, _tw._is_post_ln);
/* ---step 1. qkv = ori_q * qkv_wei + bias, and reshape qkv for multi-head
* gemm--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size * 3, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 2], _AType,
_tw._hidden_size * 3, _p_d_q, _BType, _tw._hidden_size, &_fzero,
_p_d_qkv_projected, _CType, _tw._hidden_size * 3, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// get q, k, v by split and reshape qkv
ker_arrange_encself_qkv_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_qkv_projected,
_p_d_enc_wei[_weight_offset + 3], _p_d_q, _max_batch_dim, _batch_seq_len,
_tw._dim_per_head, _tw._head_num, _max_thread_per_block);
/* ---step 2. correlation = q * k, perform softmax on correlation--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_T, CUBLAS_OP_N, _batch_seq_len, _batch_seq_len,
_tw._dim_per_head, &_atten_scaler, _p_d_k, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_q, _BType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, &_fzero, _p_d_c, _CType,
_batch_seq_len, _batch_seq_len * _batch_seq_len,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
ker_correlation_softmax_encself_launcher<_DataType>(
_batch_size, _batch_seq_len, _tw._head_num, _stream, _p_d_c,
_p_d_padding_mask);
/* ---step 3. new_q = correlation * v--- */
CHECK_GPU_ERROR(cublasGemmStridedBatchedEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._dim_per_head, _batch_seq_len,
_batch_seq_len, &_fone, _p_d_v, _AType, _tw._dim_per_head,
_batch_seq_len * _tw._dim_per_head, _p_d_c, _BType, _batch_seq_len,
_batch_seq_len * _batch_seq_len, &_fzero, _p_d_q, _CType,
_tw._dim_per_head, _batch_seq_len * _tw._dim_per_head,
_batch_size * _tw._head_num, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// use v to save reshaped q, since they are in same size and v
// will not be use again before the next multi-head-attention
ker_arrange_atten_output_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_q, _p_d_v,
_batch_seq_len, _tw._dim_per_head, _tw._head_num, _max_thread_per_block);
/* ---step 4. new_q = ori_q + new_q * output_wei--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 4], _AType,
_tw._hidden_size, _p_d_v, _BType, _tw._hidden_size, &_fone, _p_d_output,
_CType, _tw._hidden_size, _computeType, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
template <OperationType OpType_>
void Encoder<OpType_>::ffn_add_norm() {
/* ---step 0. layer_norm, add output_bias to "query"--- */
ker_norm_layer_resual_launcher<_DataType>(
_batch_token_num, _tw._hidden_size, _stream, _p_d_output, _p_d_ffn_buf1,
_p_d_enc_wei[_weight_offset + 6], _p_d_enc_wei[_weight_offset + 7],
_p_d_enc_wei[_weight_offset + 11], _max_thread_per_block,
_tw._is_post_ln);
/* ---step 1. first ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._inner_size, _batch_token_num,
_tw._hidden_size, &_fone, _p_d_enc_wei[_weight_offset + 8], _AType,
_tw._inner_size, _p_d_ffn_buf1, _BType, _tw._hidden_size, &_fzero,
_p_d_ffn_buf2, _CType, _tw._inner_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
if (_tw._use_gelu) {
ker_bias_gelu_launcher<_DataType>(
_batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
} else {
ker_bias_relu_launcher<_DataType>(
_batch_token_num, _max_thread_per_block, _stream, _p_d_ffn_buf2,
_p_d_enc_wei[_weight_offset + 9], _tw._inner_size);
}
/* ---step 2. second ffn layer--- */
CHECK_GPU_ERROR(cublasGemmEx(
_hd, CUBLAS_OP_N, CUBLAS_OP_N, _tw._hidden_size, _batch_token_num,
_tw._inner_size, &_fone, _p_d_enc_wei[_weight_offset + 10], _AType,
_tw._hidden_size, _p_d_ffn_buf2, _BType, _tw._inner_size, &_fone,
_p_d_output, _CType, _tw._hidden_size, _computeType,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
return;
}
template class Encoder<OperationType::FP16>;
template class Encoder<OperationType::FP32>;
} // namespace cuda
} // namespace lightseq
|
f9ec24c29834bc37a1e65b075c98d96b824d7461.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
std::cout << "running tsdf fusion " << std::endl;
// Location of folder containing RGB-D frames and camera pose files
std::string data_path;
// location of camera intrinsics file
std::string cam_K_file;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 0.4f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -0.3f;
float voxel_grid_origin_z = -0.2f;
float voxel_size = 0.002f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
std::cout << "parsing data path\n";
std::cout << "argc " << argc << std::endl;
data_path = argv[1];
cam_K_file = argv[2];
}
if (argc > 3){
std::cout << "parsing additional parameters\n";
int counter = 3;
voxel_size = atof(argv[counter]);
counter++;
voxel_grid_dim_x = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_y = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_z = std::atoi(argv[counter]);
counter++;
voxel_grid_origin_x = std::atof(argv[counter]);
counter++;
voxel_grid_origin_y = std::atof(argv[counter]);
counter++;
voxel_grid_origin_z = std::atof(argv[counter]);
counter++;
std::cout << "finished parsing params\n";
}
trunc_margin = 5 * voxel_size;
std::cout << "data_path "<< data_path << std::endl;
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// set base2world to be the identity
for(int i = 0; i < 16; i++){
base2world[i] = 0;
}
base2world[0] = 1;
base2world[5] = 1;
base2world[10] = 1;
base2world[15] = 1;
// for(int i = 0; i < 16; i++){
// std::cout << "base2world " << i << " = " << base2world[i] << std::endl;
// }
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
hipMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
hipMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
hipMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
hipMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
hipMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
hipMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, hipGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
int frame_idx = 0;
while(true){
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// Read base frame camera pose
std::string cam2world_file = data_path + "/" + curr_frame_prefix.str() + "_pose.txt";
// check if file exists, if not return
std::ifstream ifile(cam2world_file);
if (ifile.fail()) {
// The file doesn't exist, break out of while loop
break;
}
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// // Read current frame depth
std::string depth_im_file = data_path + "/" + curr_frame_prefix.str() + "_depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
hipMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), hipMemcpyHostToDevice);
checkCUDA(__LINE__, hipGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
hipLaunchKernelGGL(( Integrate) , dim3(voxel_grid_dim_z), dim3(voxel_grid_dim_y) , 0, 0, gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
frame_idx++;
}
// Load TSDF voxel grid from GPU to CPU memory
hipMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), hipMemcpyDeviceToHost);
checkCUDA(__LINE__, hipGetLastError());
std::string tsdf_ply_filename = data_path + "/tsdf.ply";
std::string tsdf_bin_filename = data_path + "/tsdf.bin";
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
std::cout << "tsdf_bin_filename " << tsdf_bin_filename << std::endl;
SaveVoxelGrid2SurfacePointCloud(tsdf_ply_filename, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 0.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_filename, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
| f9ec24c29834bc37a1e65b075c98d96b824d7461.cu | // ---------------------------------------------------------
// Author: Andy Zeng, Princeton University, 2016
// ---------------------------------------------------------
#include <iostream>
#include <fstream>
#include <iomanip>
#include <sstream>
#include <string>
#include "utils.hpp"
// CUDA kernel function to integrate a TSDF voxel volume given depth images
__global__
void Integrate(float * cam_K, float * cam2base, float * depth_im,
int im_height, int im_width, int voxel_grid_dim_x, int voxel_grid_dim_y, int voxel_grid_dim_z,
float voxel_grid_origin_x, float voxel_grid_origin_y, float voxel_grid_origin_z, float voxel_size, float trunc_margin,
float * voxel_grid_TSDF, float * voxel_grid_weight) {
int pt_grid_z = blockIdx.x;
int pt_grid_y = threadIdx.x;
for (int pt_grid_x = 0; pt_grid_x < voxel_grid_dim_x; ++pt_grid_x) {
// Convert voxel center from grid coordinates to base frame camera coordinates
float pt_base_x = voxel_grid_origin_x + pt_grid_x * voxel_size;
float pt_base_y = voxel_grid_origin_y + pt_grid_y * voxel_size;
float pt_base_z = voxel_grid_origin_z + pt_grid_z * voxel_size;
// Convert from base frame camera coordinates to current frame camera coordinates
float tmp_pt[3] = {0};
tmp_pt[0] = pt_base_x - cam2base[0 * 4 + 3];
tmp_pt[1] = pt_base_y - cam2base[1 * 4 + 3];
tmp_pt[2] = pt_base_z - cam2base[2 * 4 + 3];
float pt_cam_x = cam2base[0 * 4 + 0] * tmp_pt[0] + cam2base[1 * 4 + 0] * tmp_pt[1] + cam2base[2 * 4 + 0] * tmp_pt[2];
float pt_cam_y = cam2base[0 * 4 + 1] * tmp_pt[0] + cam2base[1 * 4 + 1] * tmp_pt[1] + cam2base[2 * 4 + 1] * tmp_pt[2];
float pt_cam_z = cam2base[0 * 4 + 2] * tmp_pt[0] + cam2base[1 * 4 + 2] * tmp_pt[1] + cam2base[2 * 4 + 2] * tmp_pt[2];
if (pt_cam_z <= 0)
continue;
int pt_pix_x = roundf(cam_K[0 * 3 + 0] * (pt_cam_x / pt_cam_z) + cam_K[0 * 3 + 2]);
int pt_pix_y = roundf(cam_K[1 * 3 + 1] * (pt_cam_y / pt_cam_z) + cam_K[1 * 3 + 2]);
if (pt_pix_x < 0 || pt_pix_x >= im_width || pt_pix_y < 0 || pt_pix_y >= im_height)
continue;
float depth_val = depth_im[pt_pix_y * im_width + pt_pix_x];
if (depth_val <= 0 || depth_val > 6)
continue;
float diff = depth_val - pt_cam_z;
if (diff <= -trunc_margin)
continue;
// Integrate
int volume_idx = pt_grid_z * voxel_grid_dim_y * voxel_grid_dim_x + pt_grid_y * voxel_grid_dim_x + pt_grid_x;
float dist = fmin(1.0f, diff / trunc_margin);
float weight_old = voxel_grid_weight[volume_idx];
float weight_new = weight_old + 1.0f;
voxel_grid_weight[volume_idx] = weight_new;
voxel_grid_TSDF[volume_idx] = (voxel_grid_TSDF[volume_idx] * weight_old + dist) / weight_new;
}
}
// Loads a binary file with depth data and generates a TSDF voxel volume (5m x 5m x 5m at 1cm resolution)
// Volume is aligned with respect to the camera coordinates of the first frame (a.k.a. base frame)
int main(int argc, char * argv[]) {
std::cout << "running tsdf fusion " << std::endl;
// Location of folder containing RGB-D frames and camera pose files
std::string data_path;
// location of camera intrinsics file
std::string cam_K_file;
float cam_K[3 * 3];
float base2world[4 * 4];
float cam2base[4 * 4];
float cam2world[4 * 4];
int im_width = 640;
int im_height = 480;
float depth_im[im_height * im_width];
// Voxel grid parameters (change these to change voxel grid resolution, etc.)
float voxel_grid_origin_x = 0.4f; // Location of voxel grid origin in base frame camera coordinates
float voxel_grid_origin_y = -0.3f;
float voxel_grid_origin_z = -0.2f;
float voxel_size = 0.002f;
float trunc_margin = voxel_size * 5;
int voxel_grid_dim_x = 500;
int voxel_grid_dim_y = 500;
int voxel_grid_dim_z = 500;
// Manual parameters
if (argc > 1) {
std::cout << "parsing data path\n";
std::cout << "argc " << argc << std::endl;
data_path = argv[1];
cam_K_file = argv[2];
}
if (argc > 3){
std::cout << "parsing additional parameters\n";
int counter = 3;
voxel_size = atof(argv[counter]);
counter++;
voxel_grid_dim_x = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_y = std::atoi(argv[counter]);
counter++;
voxel_grid_dim_z = std::atoi(argv[counter]);
counter++;
voxel_grid_origin_x = std::atof(argv[counter]);
counter++;
voxel_grid_origin_y = std::atof(argv[counter]);
counter++;
voxel_grid_origin_z = std::atof(argv[counter]);
counter++;
std::cout << "finished parsing params\n";
}
trunc_margin = 5 * voxel_size;
std::cout << "data_path "<< data_path << std::endl;
// Read camera intrinsics
std::vector<float> cam_K_vec = LoadMatrixFromFile(cam_K_file, 3, 3);
std::copy(cam_K_vec.begin(), cam_K_vec.end(), cam_K);
// set base2world to be the identity
for(int i = 0; i < 16; i++){
base2world[i] = 0;
}
base2world[0] = 1;
base2world[5] = 1;
base2world[10] = 1;
base2world[15] = 1;
// for(int i = 0; i < 16; i++){
// std::cout << "base2world " << i << " = " << base2world[i] << std::endl;
// }
// Invert base frame camera pose to get world-to-base frame transform
float base2world_inv[16] = {0};
invert_matrix(base2world, base2world_inv);
// Initialize voxel grid
float * voxel_grid_TSDF = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
float * voxel_grid_weight = new float[voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z];
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
voxel_grid_TSDF[i] = 1.0f;
memset(voxel_grid_weight, 0, sizeof(float) * voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z);
// Load variables to GPU memory
float * gpu_voxel_grid_TSDF;
float * gpu_voxel_grid_weight;
cudaMalloc(&gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
cudaMalloc(&gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
cudaMemcpy(gpu_voxel_grid_TSDF, voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_voxel_grid_weight, voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
float * gpu_cam_K;
float * gpu_cam2base;
float * gpu_depth_im;
cudaMalloc(&gpu_cam_K, 3 * 3 * sizeof(float));
cudaMemcpy(gpu_cam_K, cam_K, 3 * 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&gpu_cam2base, 4 * 4 * sizeof(float));
cudaMalloc(&gpu_depth_im, im_height * im_width * sizeof(float));
checkCUDA(__LINE__, cudaGetLastError());
// Loop through each depth frame and integrate TSDF voxel grid
int frame_idx = 0;
while(true){
std::ostringstream curr_frame_prefix;
curr_frame_prefix << std::setw(6) << std::setfill('0') << frame_idx;
// Read base frame camera pose
std::string cam2world_file = data_path + "/" + curr_frame_prefix.str() + "_pose.txt";
// check if file exists, if not return
std::ifstream ifile(cam2world_file);
if (ifile.fail()) {
// The file doesn't exist, break out of while loop
break;
}
std::vector<float> cam2world_vec = LoadMatrixFromFile(cam2world_file, 4, 4);
std::copy(cam2world_vec.begin(), cam2world_vec.end(), cam2world);
// // Read current frame depth
std::string depth_im_file = data_path + "/" + curr_frame_prefix.str() + "_depth.png";
ReadDepth(depth_im_file, im_height, im_width, depth_im);
// Compute relative camera pose (camera-to-base frame)
multiply_matrix(base2world_inv, cam2world, cam2base);
cudaMemcpy(gpu_cam2base, cam2base, 4 * 4 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_depth_im, depth_im, im_height * im_width * sizeof(float), cudaMemcpyHostToDevice);
checkCUDA(__LINE__, cudaGetLastError());
std::cout << "Fusing: " << depth_im_file << std::endl;
Integrate <<< voxel_grid_dim_z, voxel_grid_dim_y >>>(gpu_cam_K, gpu_cam2base, gpu_depth_im,
im_height, im_width, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z, voxel_size, trunc_margin,
gpu_voxel_grid_TSDF, gpu_voxel_grid_weight);
frame_idx++;
}
// Load TSDF voxel grid from GPU to CPU memory
cudaMemcpy(voxel_grid_TSDF, gpu_voxel_grid_TSDF, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(voxel_grid_weight, gpu_voxel_grid_weight, voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDA(__LINE__, cudaGetLastError());
std::string tsdf_ply_filename = data_path + "/tsdf.ply";
std::string tsdf_bin_filename = data_path + "/tsdf.bin";
// Compute surface points from TSDF voxel grid and save to point cloud .ply file
std::cout << "Saving surface point cloud (tsdf.ply)..." << std::endl;
std::cout << "tsdf_bin_filename " << tsdf_bin_filename << std::endl;
SaveVoxelGrid2SurfacePointCloud(tsdf_ply_filename, voxel_grid_dim_x, voxel_grid_dim_y, voxel_grid_dim_z,
voxel_size, voxel_grid_origin_x, voxel_grid_origin_y, voxel_grid_origin_z,
voxel_grid_TSDF, voxel_grid_weight, 0.2f, 0.0f);
// Save TSDF voxel grid and its parameters to disk as binary file (float array)
std::cout << "Saving TSDF voxel grid values to disk (tsdf.bin)..." << std::endl;
std::ofstream outFile(tsdf_bin_filename, std::ios::binary | std::ios::out);
float voxel_grid_dim_xf = (float) voxel_grid_dim_x;
float voxel_grid_dim_yf = (float) voxel_grid_dim_y;
float voxel_grid_dim_zf = (float) voxel_grid_dim_z;
outFile.write((char*)&voxel_grid_dim_xf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_yf, sizeof(float));
outFile.write((char*)&voxel_grid_dim_zf, sizeof(float));
outFile.write((char*)&voxel_grid_origin_x, sizeof(float));
outFile.write((char*)&voxel_grid_origin_y, sizeof(float));
outFile.write((char*)&voxel_grid_origin_z, sizeof(float));
outFile.write((char*)&voxel_size, sizeof(float));
outFile.write((char*)&trunc_margin, sizeof(float));
for (int i = 0; i < voxel_grid_dim_x * voxel_grid_dim_y * voxel_grid_dim_z; ++i)
outFile.write((char*)&voxel_grid_TSDF[i], sizeof(float));
outFile.close();
return 0;
}
|
e173656ff23f695da30d7e0b547cbdb62262f33a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//See: https://devblogs.nvidia.com/parallelforall/how-access-global-memory-efficiently-cuda-c-kernels/
#include <stdio.h>
#include <assert.h>
#define DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
template <typename T>
__global__ void offset(T* a, int s)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + s;
a[i] = a[i] + 1;
}
template <typename T>
__global__ void stride(T* a, int s)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x) * s;
a[i] = a[i] + 1;
}
template <typename T>
void runTest(int deviceId, int nMB)
{
int blockSize = 256;
float ms;
T *d_a;
hipEvent_t startEvent, stopEvent;
int n = nMB*1024*1024/sizeof(T);
// NB: d_a(33*nMB) for stride case
checkCuda( hipMalloc(&d_a, n * 33 * sizeof(T)) );
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
printf("Offset, Bandwidth (GB/s):\n");
hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 0); // warm up
for (int i = 0; i <= 32; i++) {
checkCuda( hipMemset(d_a, 0, n * sizeof(T)) );
checkCuda( hipEventRecord(startEvent,0) );
hipLaunchKernelGGL(( offset), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i);
checkCuda( hipEventRecord(stopEvent,0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
printf("\n");
printf("Stride, Bandwidth (GB/s):\n");
hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, 1); // warm up
for (int i = 1; i <= 32; i++) {
checkCuda( hipMemset(d_a, 0, n * sizeof(T)) );
checkCuda( hipEventRecord(startEvent,0) );
hipLaunchKernelGGL(( stride), dim3(n/blockSize), dim3(blockSize), 0, 0, d_a, i);
checkCuda( hipEventRecord(stopEvent,0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
hipFree(d_a);
}
int main(int argc, char **argv)
{
int nMB = 4;
int deviceId = 0;
bool bFp64 = false;
for (int i = 1; i < argc; i++) {
if (!strncmp(argv[i], "dev=", 4))
deviceId = atoi((char*)(&argv[i][4]));
else if (!strcmp(argv[i], "fp64"))
bFp64 = true;
}
hipDeviceProp_t prop;
checkCuda( hipSetDevice(deviceId));
checkCuda( hipGetDeviceProperties(&prop, deviceId) );
printf("Device: %s\n", prop.name);
printf("Transfer size (MB): %d\n", nMB);
printf("%s Precision\n", bFp64 ? "Double" : "Single");
if (bFp64) runTest<double>(deviceId, nMB);
else runTest<float>(deviceId, nMB);
}
| e173656ff23f695da30d7e0b547cbdb62262f33a.cu | //See: https://devblogs.nvidia.com/parallelforall/how-access-global-memory-efficiently-cuda-c-kernels/
#include <stdio.h>
#include <assert.h>
#define DEBUG
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
template <typename T>
__global__ void offset(T* a, int s)
{
int i = blockDim.x * blockIdx.x + threadIdx.x + s;
a[i] = a[i] + 1;
}
template <typename T>
__global__ void stride(T* a, int s)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x) * s;
a[i] = a[i] + 1;
}
template <typename T>
void runTest(int deviceId, int nMB)
{
int blockSize = 256;
float ms;
T *d_a;
cudaEvent_t startEvent, stopEvent;
int n = nMB*1024*1024/sizeof(T);
// NB: d_a(33*nMB) for stride case
checkCuda( cudaMalloc(&d_a, n * 33 * sizeof(T)) );
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
printf("Offset, Bandwidth (GB/s):\n");
offset<<<n/blockSize, blockSize>>>(d_a, 0); // warm up
for (int i = 0; i <= 32; i++) {
checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) );
checkCuda( cudaEventRecord(startEvent,0) );
offset<<<n/blockSize, blockSize>>>(d_a, i);
checkCuda( cudaEventRecord(stopEvent,0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
printf("\n");
printf("Stride, Bandwidth (GB/s):\n");
stride<<<n/blockSize, blockSize>>>(d_a, 1); // warm up
for (int i = 1; i <= 32; i++) {
checkCuda( cudaMemset(d_a, 0, n * sizeof(T)) );
checkCuda( cudaEventRecord(startEvent,0) );
stride<<<n/blockSize, blockSize>>>(d_a, i);
checkCuda( cudaEventRecord(stopEvent,0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) );
printf("%d, %f\n", i, 2*nMB/ms);
}
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
cudaFree(d_a);
}
int main(int argc, char **argv)
{
int nMB = 4;
int deviceId = 0;
bool bFp64 = false;
for (int i = 1; i < argc; i++) {
if (!strncmp(argv[i], "dev=", 4))
deviceId = atoi((char*)(&argv[i][4]));
else if (!strcmp(argv[i], "fp64"))
bFp64 = true;
}
cudaDeviceProp prop;
checkCuda( cudaSetDevice(deviceId));
checkCuda( cudaGetDeviceProperties(&prop, deviceId) );
printf("Device: %s\n", prop.name);
printf("Transfer size (MB): %d\n", nMB);
printf("%s Precision\n", bFp64 ? "Double" : "Single");
if (bFp64) runTest<double>(deviceId, nMB);
else runTest<float>(deviceId, nMB);
}
|
ec62be6b8b93560ac7ab6c388e84909e54a3f28c.hip | // !!! This is a file automatically generated by hipify!!!
// #######################################################
//
// Exemplo (template) de multiplicao de matrizes em CUDA
// Disciplina: OPRP001 - Programao Paralela
// Prof.: Mauricio Pillon
//
// #######################################################
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
// Matriz Quadrada (nro_linhas = nro_colunas)
#define N \
4 // Nmero de linhas
// Nmero de colunas
// GPU: Multiplicao das matrizes (a) e (b), resultado em (c)
__global__ void matMult(int *da, int *db, int *dc) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
int k=0;
if (col < N && row < N) {
for (int i = 0; i < N; i++) {
sum += da[row * N + i] * db[i * N + col];
}
dc[row * N + col] = sum;
}
}
// GPU: Imprime ndices na matriz
__global__ void printIndex(void) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
printf("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n", i, j, (i * N + j),
threadIdx.x, blockIdx.x, blockDim.x, threadIdx.y, blockIdx.y,
blockDim.y);
}
// GPU: Inicializa os vetores (a), (b) e (c) na Memria Global
__global__ void dirtyMem(int *da, int *db, int *dc) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
da[i] = -1;
db[i] = -2;
dc[i] = -3;
}
// CPU: Inicializa os vetores (a) e (b)
__host__ void initvet(int *host_a, int *host_b) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
host_b[i * N + j] = (i + j) + ((N - 1) * i);
host_a[i * N + j] = (N * N) - host_b[i * N + j];
}
}
}
// CPU: Imprime matriz
__host__ void printMat(int *mat) {
for (int j = 0; j < N; j++)
printf("\t(%d)", j);
printf("\n");
for (int i = 0; i < N; i++) {
printf("(%d)", i);
for (int j = 0; j < N; j++) {
printf("\t%d", mat[i * N + j]);
}
printf("\n");
}
}
// CPU: funo principal
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
// Alocao de matriz quadrada
size = N * N * sizeof(int);
// Alocao de memria no host
hipHostMalloc((void **)&a, size);
hipHostMalloc((void **)&b, size);
hipHostMalloc((void **)&c, size);
// Alocao de memria na GPU para os vetores (a,b e c)
hipMalloc((void **)&dev_a, size);
hipMalloc((void **)&dev_b, size);
hipMalloc((void **)&dev_c, size);
// Atribui valores iniciais aos vetores em GPU
hipLaunchKernelGGL(( dirtyMem), dim3(N), dim3(N), 0, 0, dev_a, dev_b, dev_c);
// Cpia GPU para CPU
hipMemcpy(a, dev_a, size, hipMemcpyDeviceToHost);
hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost);
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
// Impresso na tela dos valores dos vetores
printf("\t ### Valores Inicializados na GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Inicializao dos vetores (a) e (b) no host
initvet(a, b);
// Cpia dos vetores gerados em CPU p/ memria da GPU
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
// Nmero de blocos e threads p/ dimenses (x,y)
dim3 dimBlock(1, 1);
dim3 dimThreads(N, N);
// Imprime as posies acessadas pelo dimBlock e dimThreads
hipLaunchKernelGGL(( printIndex), dim3(dimBlock), dim3(dimThreads), 0, 0, );
// Execuo do kernel matMult em GPU
hipLaunchKernelGGL(( matMult), dim3(dimBlock), dim3(dimThreads), 0, 0, dev_a, dev_b, dev_c);
hipDeviceSynchronize();
// Cpia do vetor (c) da GPU (Memria Global) para CPU
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
// Impresso na tela dos valores dos vetores
printf("\t ### Valores aps processamento em GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Libera a Memria Global (GPU)
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
// Libera a Memria Global (CPU)
hipHostFree(a);
hipHostFree(b);
hipHostFree(c);
return 0;
}
| ec62be6b8b93560ac7ab6c388e84909e54a3f28c.cu | // #######################################################
//
// Exemplo (template) de multiplicação de matrizes em CUDA
// Disciplina: OPRP001 - Programação Paralela
// Prof.: Mauricio Pillon
//
// #######################################################
#include <cuda.h>
#include <math.h>
#include <stdio.h>
// Matriz Quadrada (nro_linhas = nro_colunas)
#define N \
4 // Número de linhas
// Número de colunas
// GPU: Multiplicação das matrizes (a) e (b), resultado em (c)
__global__ void matMult(int *da, int *db, int *dc) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
int k=0;
if (col < N && row < N) {
for (int i = 0; i < N; i++) {
sum += da[row * N + i] * db[i * N + col];
}
dc[row * N + col] = sum;
}
}
// GPU: Imprime índices na matriz
__global__ void printIndex(void) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
printf("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n", i, j, (i * N + j),
threadIdx.x, blockIdx.x, blockDim.x, threadIdx.y, blockIdx.y,
blockDim.y);
}
// GPU: Inicializa os vetores (a), (b) e (c) na Memória Global
__global__ void dirtyMem(int *da, int *db, int *dc) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
da[i] = -1;
db[i] = -2;
dc[i] = -3;
}
// CPU: Inicializa os vetores (a) e (b)
__host__ void initvet(int *host_a, int *host_b) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
host_b[i * N + j] = (i + j) + ((N - 1) * i);
host_a[i * N + j] = (N * N) - host_b[i * N + j];
}
}
}
// CPU: Imprime matriz
__host__ void printMat(int *mat) {
for (int j = 0; j < N; j++)
printf("\t(%d)", j);
printf("\n");
for (int i = 0; i < N; i++) {
printf("(%d)", i);
for (int j = 0; j < N; j++) {
printf("\t%d", mat[i * N + j]);
}
printf("\n");
}
}
// CPU: função principal
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
// Alocação de matriz quadrada
size = N * N * sizeof(int);
// Alocação de memória no host
cudaMallocHost((void **)&a, size);
cudaMallocHost((void **)&b, size);
cudaMallocHost((void **)&c, size);
// Alocação de memória na GPU para os vetores (a,b e c)
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, size);
// Atribui valores iniciais aos vetores em GPU
dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c);
// Cópia GPU para CPU
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf("\t ### Valores Inicializados na GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Inicialização dos vetores (a) e (b) no host
initvet(a, b);
// Cópia dos vetores gerados em CPU p/ memória da GPU
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
// Número de blocos e threads p/ dimensões (x,y)
dim3 dimBlock(1, 1);
dim3 dimThreads(N, N);
// Imprime as posições acessadas pelo dimBlock e dimThreads
printIndex<<<dimBlock, dimThreads>>>();
// Execução do kernel matMult em GPU
matMult<<<dimBlock, dimThreads>>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
// Cópia do vetor (c) da GPU (Memória Global) para CPU
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
// Impressão na tela dos valores dos vetores
printf("\t ### Valores após processamento em GPU ###\n");
printf("\t ### Matriz (a) ### \n");
printMat(a);
printf("\t ### Matriz (b) ### \n");
printMat(b);
printf("\t ### Matriz (c) ### \n");
printMat(c);
// Libera a Memória Global (GPU)
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// Libera a Memória Global (CPU)
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
ba2c487b3d0bd15a9ef0dd4538345b9b7438bb59.hip | // !!! This is a file automatically generated by hipify!!!
// Array multiplication: C = A * B:
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 64 // number of threads in each block
#endif
#ifndef DATASET_SIZE
#define DATASET_SIZE ( 8*1024*1024 ) // size of the array
#endif
float hA[DATASET_SIZE];
float hB[DATASET_SIZE];
float hC[DATASET_SIZE];
#ifndef TOL
#define TOL 0.00001f // tolerance to relative error
#endif
void
CudaCheckError()
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
fprintf(stderr, "CUDA failure %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(e));
}
}
// array multiplication on the device: C = A * B
__global__ void ArrayMul(float* A, float* B, float* C)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < DATASET_SIZE)
C[gid] = A[gid] * B[gid];
}
// main program:
int
main(int argc, char* argv[])
{
int dev = findCudaDevice(argc, (const char**)argv);
// fill host memory:
for (int i = 0; i < DATASET_SIZE; i++)
{
hA[i] = hB[i] = sqrtf((float)i);
}
// allocate device memory:
float* dA, * dB, * dC;
hipMalloc((void**)(&dA), sizeof(hA));
hipMalloc((void**)(&dB), sizeof(hB));
hipMalloc((void**)(&dC), sizeof(hC));
CudaCheckError();
// copy host memory to the device:
hipMemcpy(dA, hA, DATASET_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB, hB, DATASET_SIZE * sizeof(float), hipMemcpyHostToDevice);
CudaCheckError();
// setup the execution parameters:
dim3 grid(DATASET_SIZE / THREADS_PER_BLOCK, 1, 1);
dim3 threads(THREADS_PER_BLOCK, 1, 1);
// create and start the timer:
hipDeviceSynchronize();
// allocate the events that we'll use for timing:
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
CudaCheckError();
// record the start event:
hipEventRecord(start, NULL);
CudaCheckError();
// execute the kernel:
ArrayMul << < grid, threads >> > (dA, dB, dC);
// record the stop event:
hipEventRecord(stop, NULL);
CudaCheckError();
// wait for the stop event to complete:
hipEventSynchronize(stop);
CudaCheckError();
float msecTotal;
hipEventElapsedTime(&msecTotal, start, stop);
CudaCheckError();
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double multsPerSecond = (double)DATASET_SIZE / secondsTotal;
double megaMultsPerSecond = multsPerSecond / 1000000.;
fprintf(stderr, "%12d\t%4d\t%10.2lf\n", DATASET_SIZE, THREADS_PER_BLOCK, megaMultsPerSecond);
// copy result from the device to the host:
hipMemcpy(hC, dC, sizeof(hC), hipMemcpyDeviceToHost);
CudaCheckError();
// check for correctness:
fprintf(stderr, "Checking computed result for correctness:\n");
bool correct = true;
for (int i = 1; i < DATASET_SIZE; i++)
{
double error = ((double)hC[i] - (double)i) / (double)i;
if (fabs(error) > TOL)
{
fprintf(stderr, "C[%12d] = %10.2lf, correct = %10.2lf\n", i, (double)hC[i], (double)i);
correct = false;
}
}
if (correct)
fprintf(stderr, "PASS.\n");
else
fprintf(stderr, "FAIL.\n");
// clean up:
hipFree(dA);
hipFree(dB);
hipFree(dC);
CudaCheckError();
return 0;
}
| ba2c487b3d0bd15a9ef0dd4538345b9b7438bb59.cu |
// Array multiplication: C = A * B:
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 64 // number of threads in each block
#endif
#ifndef DATASET_SIZE
#define DATASET_SIZE ( 8*1024*1024 ) // size of the array
#endif
float hA[DATASET_SIZE];
float hB[DATASET_SIZE];
float hC[DATASET_SIZE];
#ifndef TOL
#define TOL 0.00001f // tolerance to relative error
#endif
void
CudaCheckError()
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
fprintf(stderr, "CUDA failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e));
}
}
// array multiplication on the device: C = A * B
__global__ void ArrayMul(float* A, float* B, float* C)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < DATASET_SIZE)
C[gid] = A[gid] * B[gid];
}
// main program:
int
main(int argc, char* argv[])
{
int dev = findCudaDevice(argc, (const char**)argv);
// fill host memory:
for (int i = 0; i < DATASET_SIZE; i++)
{
hA[i] = hB[i] = sqrtf((float)i);
}
// allocate device memory:
float* dA, * dB, * dC;
cudaMalloc((void**)(&dA), sizeof(hA));
cudaMalloc((void**)(&dB), sizeof(hB));
cudaMalloc((void**)(&dC), sizeof(hC));
CudaCheckError();
// copy host memory to the device:
cudaMemcpy(dA, hA, DATASET_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, DATASET_SIZE * sizeof(float), cudaMemcpyHostToDevice);
CudaCheckError();
// setup the execution parameters:
dim3 grid(DATASET_SIZE / THREADS_PER_BLOCK, 1, 1);
dim3 threads(THREADS_PER_BLOCK, 1, 1);
// create and start the timer:
cudaDeviceSynchronize();
// allocate the events that we'll use for timing:
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
CudaCheckError();
// record the start event:
cudaEventRecord(start, NULL);
CudaCheckError();
// execute the kernel:
ArrayMul << < grid, threads >> > (dA, dB, dC);
// record the stop event:
cudaEventRecord(stop, NULL);
CudaCheckError();
// wait for the stop event to complete:
cudaEventSynchronize(stop);
CudaCheckError();
float msecTotal;
cudaEventElapsedTime(&msecTotal, start, stop);
CudaCheckError();
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double multsPerSecond = (double)DATASET_SIZE / secondsTotal;
double megaMultsPerSecond = multsPerSecond / 1000000.;
fprintf(stderr, "%12d\t%4d\t%10.2lf\n", DATASET_SIZE, THREADS_PER_BLOCK, megaMultsPerSecond);
// copy result from the device to the host:
cudaMemcpy(hC, dC, sizeof(hC), cudaMemcpyDeviceToHost);
CudaCheckError();
// check for correctness:
fprintf(stderr, "Checking computed result for correctness:\n");
bool correct = true;
for (int i = 1; i < DATASET_SIZE; i++)
{
double error = ((double)hC[i] - (double)i) / (double)i;
if (fabs(error) > TOL)
{
fprintf(stderr, "C[%12d] = %10.2lf, correct = %10.2lf\n", i, (double)hC[i], (double)i);
correct = false;
}
}
if (correct)
fprintf(stderr, "PASS.\n");
else
fprintf(stderr, "FAIL.\n");
// clean up:
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
CudaCheckError();
return 0;
}
|
6650471534467af99d6aaa443e11f38efca809f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int *a, int *b, int *c)
{
/* add the proper index so each block calculates a different value in the
array */
c[FIXME] = a[FIXME] + b[FIXME];
}
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( hipMalloc( (void **) &d_a, size ) );
/* insert code here for d_b and d_c */
FIXME
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
} /* end for */
/* copy inputs to device */
checkCUDA( hipMemcpy( d_a, a, size, hipMemcpyHostToDevice ) );
/* insert code to copy b to the device */
FIXME
/* zero out C array */
checkCUDA( hipMemset( d_c, 0, size ) );
/* launch the kernel on the GPU */
/* finish the kernel launch with N blocks and 1 thread per block */
hipLaunchKernelGGL(( add), dim3(FIXME), dim3(FIXME) , 0, 0, d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
if( c[i] != a[i] + b[i] )
{
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(a);
free(b);
free(c);
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
return 0;
} /* end main */
| 6650471534467af99d6aaa443e11f38efca809f7.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
__global__ void add(int *a, int *b, int *c)
{
/* add the proper index so each block calculates a different value in the
array */
c[FIXME] = a[FIXME] + b[FIXME];
}
#define N 32
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( cudaMalloc( (void **) &d_a, size ) );
/* insert code here for d_b and d_c */
FIXME
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
} /* end for */
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ) );
/* insert code to copy b to the device */
FIXME
/* zero out C array */
checkCUDA( cudaMemset( d_c, 0, size ) );
/* launch the kernel on the GPU */
/* finish the kernel launch with N blocks and 1 thread per block */
add<<< FIXME, FIXME >>>( d_a, d_b, d_c );
checkKERNEL()
/* copy result back to host */
checkCUDA( cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
printf("c[%d] = %d\n",i,c[i]);
if( c[i] != a[i] + b[i] )
{
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(a);
free(b);
free(c);
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
return 0;
} /* end main */
|
f3d655e531c732d12d1c84979da2ee2842105143.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t* dict;
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const* ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s* s,
int t,
Storage& temp_storage)
{
if (t == 0) { s->nnz = 0; }
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t* valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffff'ffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(device_2dspan<DictionaryChunk> chunks,
device_span<orc_column_device_view const> orc_columns,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s* const s = &state_g;
// Index of the column in the `str_col_indexes` array
uint32_t const str_col_idx = blockIdx.x;
// Index of the column in the `orc_columns` array
auto const col_idx = str_col_indexes[str_col_idx];
uint32_t group_id = blockIdx.y;
auto const num_str_cols = str_col_indexes.size();
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[group_id][str_col_idx];
s->chunk.leaf_column = &orc_columns[col_idx];
s->chunk.dict_data = dict_data[str_col_idx].data() + rowgroup_bounds[group_id][col_idx].begin;
s->chunk.dict_index = dict_index[str_col_idx].data();
s->chunk.start_row = rowgroup_bounds[group_id][col_idx].begin;
s->chunk.num_rows = rowgroup_bounds[group_id][col_idx].size();
s->dict = tmp_indices[str_col_idx].data() + s->chunk.start_row;
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
auto t_dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
t_dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x1'0001;
sum45 += (sum23 >> 16) * 0x1'0001;
sum67 += (sum45 >> 16) * 0x1'0001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x1'0001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t pos = 0;
uint32_t pos_old = 0;
uint32_t sh = 0;
if (i + t < nnz) {
ck_row = t_dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
bool collision = false;
uint32_t colliding_row = 0;
uint32_t pos_new = 0;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
t_dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id][str_col_idx].num_strings = nnz;
chunks[group_id][str_col_idx].string_char_count = s->chunk.string_char_count;
chunks[group_id][str_col_idx].num_dict_strings = nnz - s->total_dupes;
chunks[group_id][str_col_idx].dict_char_count = dict_char_count;
chunks[group_id][str_col_idx].leaf_column = s->chunk.leaf_column;
chunks[group_id][str_col_idx].dict_data = s->chunk.dict_data;
chunks[group_id][str_col_idx].dict_index = s->chunk.dict_index;
chunks[group_id][str_col_idx].start_row = s->chunk.start_row;
chunks[group_id][str_col_idx].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(device_2dspan<StripeDictionary> stripes,
device_2dspan<DictionaryChunk const> chunks)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t* src;
uint32_t* dst;
if (t == 0) stripe_g = stripes[stripe_id][col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk][col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[stripe_g.start_chunk + g][col_id].dict_data;
chunk_len = chunks[stripe_g.start_chunk + g][col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(device_2dspan<StripeDictionary> stripes)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id][col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id][col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id][col_id].dict_char_count = dict_char_count;
}
}
void InitDictionaryIndices(device_span<orc_column_device_view const> orc_columns,
device_2dspan<DictionaryChunk> chunks,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(str_col_indexes.size(), rowgroup_bounds.size().first);
hipLaunchKernelGGL(( gpuInitDictionaryIndices<block_size>), dim3(dim_grid), dim3(dim_block), 0, stream.value(),
chunks, orc_columns, dict_data, dict_index, tmp_indices, rowgroup_bounds, str_col_indexes);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(device_2dspan<StripeDictionary> d_stripes_dicts,
host_2dspan<StripeDictionary const> h_stripe_dicts,
device_2dspan<DictionaryChunk const> chunks,
rmm::cuda_stream_view stream)
{
auto const num_stripes = h_stripe_dicts.size().first;
auto const num_columns = h_stripe_dicts.size().second;
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
hipLaunchKernelGGL(( gpuCompactChunkDictionaries), dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), d_stripes_dicts,
chunks);
for (uint32_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (auto const& stripe_dict : h_stripe_dicts[stripe_idx]) {
if (stripe_dict.dict_data != nullptr) {
auto const dict_data_ptr = thrust::device_pointer_cast(stripe_dict.dict_data);
auto const string_column = stripe_dict.leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripe_dict.num_strings,
[string_column] __device__(const uint32_t& lhs, const uint32_t& rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
}
hipLaunchKernelGGL(( gpuBuildStripeDictionaries<1024>)
, dim3(dim_grid_build), dim3(dim_block), 0, stream.value(), d_stripes_dicts);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| f3d655e531c732d12d1c84979da2ee2842105143.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_gpu.hpp"
#include <cudf/io/orc_types.hpp>
#include <cudf/table/table_device_view.cuh>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
constexpr int init_hash_bits = 12;
struct dictinit_state_s {
uint32_t nnz;
uint32_t total_dupes;
DictionaryChunk chunk;
volatile uint32_t scratch_red[32];
uint32_t* dict;
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
/**
* @brief Return a 12-bit hash from a string
*/
static inline __device__ uint32_t hash_string(const string_view val)
{
if (val.empty()) {
return 0;
} else {
char const* ptr = val.data();
uint32_t len = val.size_bytes();
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
}
}
/**
* @brief Fill dictionary with the indices of non-null rows
*
* @param[in,out] s dictionary builder state
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to scan non-null positions
*/
template <int block_size, typename Storage>
static __device__ void LoadNonNullIndices(volatile dictinit_state_s* s,
int t,
Storage& temp_storage)
{
if (t == 0) { s->nnz = 0; }
for (uint32_t i = 0; i < s->chunk.num_rows; i += block_size) {
const uint32_t* valid_map = s->chunk.leaf_column->null_mask();
auto column_offset = s->chunk.leaf_column->offset();
uint32_t is_valid, nz_pos;
if (t < block_size / 32) {
if (!valid_map) {
s->scratch_red[t] = 0xffff'ffffu;
} else {
uint32_t const row = s->chunk.start_row + i + t * 32;
auto const chunk_end = s->chunk.start_row + s->chunk.num_rows;
auto const valid_map_idx = (row + column_offset) / 32;
uint32_t valid = (row < chunk_end) ? valid_map[valid_map_idx] : 0;
auto const rows_in_next_word = (row + column_offset) & 0x1f;
if (rows_in_next_word != 0) {
auto const rows_in_current_word = 32 - rows_in_next_word;
// Read next word if any rows are within the chunk
uint32_t const valid_next =
(row + rows_in_current_word < chunk_end) ? valid_map[valid_map_idx + 1] : 0;
valid = __funnelshift_r(valid, valid_next, rows_in_next_word);
}
s->scratch_red[t] = valid;
}
}
__syncthreads();
is_valid = (i + t < s->chunk.num_rows) ? (s->scratch_red[t >> 5] >> (t & 0x1f)) & 1 : 0;
uint32_t tmp_nnz;
cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>(temp_storage)
.ExclusiveSum(is_valid, nz_pos, tmp_nnz);
nz_pos += s->nnz;
__syncthreads();
if (!t) { s->nnz += tmp_nnz; }
if (is_valid) { s->dict[nz_pos] = i + t; }
__syncthreads();
}
}
/**
* @brief Gather all non-NULL string rows and compute total character data size
*/
// blockDim {block_size,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size, 2)
gpuInitDictionaryIndices(device_2dspan<DictionaryChunk> chunks,
device_span<orc_column_device_view const> orc_columns,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes)
{
__shared__ __align__(16) dictinit_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
dictinit_state_s* const s = &state_g;
// Index of the column in the `str_col_indexes` array
uint32_t const str_col_idx = blockIdx.x;
// Index of the column in the `orc_columns` array
auto const col_idx = str_col_indexes[str_col_idx];
uint32_t group_id = blockIdx.y;
auto const num_str_cols = str_col_indexes.size();
uint32_t nnz, start_row, dict_char_count;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[group_id][str_col_idx];
s->chunk.leaf_column = &orc_columns[col_idx];
s->chunk.dict_data = dict_data[str_col_idx].data() + rowgroup_bounds[group_id][col_idx].begin;
s->chunk.dict_index = dict_index[str_col_idx].data();
s->chunk.start_row = rowgroup_bounds[group_id][col_idx].begin;
s->chunk.num_rows = rowgroup_bounds[group_id][col_idx].size();
s->dict = tmp_indices[str_col_idx].data() + s->chunk.start_row;
}
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
// First, take care of NULLs, and count how many strings we have (TODO: bypass this step when
// there are no nulls)
LoadNonNullIndices<block_size>(s, t, temp_storage.scan_storage);
// Sum the lengths of all the strings
if (t == 0) {
s->chunk.string_char_count = 0;
s->total_dupes = 0;
}
nnz = s->nnz;
auto t_dict_data = s->chunk.dict_data;
start_row = s->chunk.start_row;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t len = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
len = static_cast<uint32_t>(string_val.size_bytes());
hash = hash_string(string_val);
}
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (t == 0) s->chunk.string_char_count += len;
if (i + t < nnz) {
atomicAdd(&s->map.u32[hash >> 1], 1 << ((hash & 1) ? 16 : 0));
t_dict_data[i + t] = start_row + ck_row;
}
__syncthreads();
}
// Reorder the 16-bit local indices according to the hash value of the strings
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
{
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x1'0001;
sum45 += (sum23 >> 16) * 0x1'0001;
sum67 += (sum45 >> 16) * 0x1'0001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
__syncthreads();
sum_w = (sum_w - (sum67 >> 16)) * 0x1'0001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
__syncthreads();
}
// Put the indices back in hash order
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0;
uint32_t hash = 0;
uint32_t pos = 0;
uint32_t pos_old = 0;
uint32_t sh = 0;
if (i + t < nnz) {
ck_row = t_dict_data[i + t] - start_row;
string_view string_val = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
hash = hash_string(string_val);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = ck_row;
}
__syncthreads();
bool collision = false;
uint32_t colliding_row = 0;
uint32_t pos_new = 0;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(s->dict + pos_old, ck_row); }
__syncthreads();
// Resolve collision
if (collision && ck_row == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the strings are ordered by hash, compare every string with the first entry in the hash
// map, the position of the first string can be inferred from the hash map counts
dict_char_count = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
ck_row = s->dict[i + t];
string_view string_value = s->chunk.leaf_column->element<string_view>(ck_row + start_row);
auto const string_length = static_cast<uint32_t>(string_value.size_bytes());
auto const hash = hash_string(string_value);
ck_row_ref = s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0];
if (ck_row_ref != ck_row) {
string_view reference_string =
s->chunk.leaf_column->element<string_view>(ck_row_ref + start_row);
is_dupe = (string_value == reference_string);
dict_char_count += (is_dupe) ? 0 : string_length;
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
t_dict_data[i + t - dupes_before] = ck_row + start_row;
} else {
s->chunk.dict_index[ck_row + start_row] = (ck_row_ref + start_row) | (1u << 31);
}
}
}
// temp_storage is being used twice, so make sure there is `__syncthreads()` between them
// while making any future changes.
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (!t) {
chunks[group_id][str_col_idx].num_strings = nnz;
chunks[group_id][str_col_idx].string_char_count = s->chunk.string_char_count;
chunks[group_id][str_col_idx].num_dict_strings = nnz - s->total_dupes;
chunks[group_id][str_col_idx].dict_char_count = dict_char_count;
chunks[group_id][str_col_idx].leaf_column = s->chunk.leaf_column;
chunks[group_id][str_col_idx].dict_data = s->chunk.dict_data;
chunks[group_id][str_col_idx].dict_index = s->chunk.dict_index;
chunks[group_id][str_col_idx].start_row = s->chunk.start_row;
chunks[group_id][str_col_idx].num_rows = s->chunk.num_rows;
}
}
/**
* @brief In-place concatenate dictionary data for all chunks in each stripe
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] chunks DictionaryChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactChunkDictionaries(device_2dspan<StripeDictionary> stripes,
device_2dspan<DictionaryChunk const> chunks)
{
__shared__ __align__(16) StripeDictionary stripe_g;
__shared__ __align__(16) DictionaryChunk chunk_g;
__shared__ const uint32_t* volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t chunk_len;
int t = threadIdx.x;
const uint32_t* src;
uint32_t* dst;
if (t == 0) stripe_g = stripes[stripe_id][col_id];
__syncthreads();
if (!stripe_g.dict_data) { return; }
if (t == 0) chunk_g = chunks[stripe_g.start_chunk][col_id];
__syncthreads();
dst = stripe_g.dict_data + chunk_g.num_dict_strings;
for (uint32_t g = 1; g < stripe_g.num_chunks; g++) {
if (!t) {
src = chunks[stripe_g.start_chunk + g][col_id].dict_data;
chunk_len = chunks[stripe_g.start_chunk + g][col_id].num_dict_strings;
ck_curptr_g = src;
ck_curlen_g = chunk_len;
}
__syncthreads();
src = ck_curptr_g;
chunk_len = ck_curlen_g;
if (src != dst) {
for (uint32_t i = 0; i < chunk_len; i += 1024) {
uint32_t idx = (i + t < chunk_len) ? src[i + t] : 0;
__syncthreads();
if (i + t < chunk_len) dst[i + t] = idx;
}
}
dst += chunk_len;
__syncthreads();
}
}
struct build_state_s {
uint32_t total_dupes;
StripeDictionary stripe;
volatile uint32_t scratch_red[32];
};
/**
* @brief Eliminate duplicates in-place and generate column dictionary index
*
* @param[in] stripes StripeDictionary device array [stripe][column]
* @param[in] num_columns Number of string columns
*/
// NOTE: Prone to poor utilization on small datasets due to 1 block per dictionary
// blockDim {1024,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuBuildStripeDictionaries(device_2dspan<StripeDictionary> stripes)
{
__shared__ __align__(16) build_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size, cub::BLOCK_SCAN_WARP_SCANS>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
build_state_s* const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t stripe_id = blockIdx.y;
uint32_t num_strings;
uint32_t *dict_data, *dict_index;
uint32_t dict_char_count;
int t = threadIdx.x;
if (t == 0) s->stripe = stripes[stripe_id][col_id];
if (t == 31 * 32) { s->total_dupes = 0; }
__syncthreads();
num_strings = s->stripe.num_strings;
dict_data = s->stripe.dict_data;
if (!dict_data) return;
dict_index = s->stripe.dict_index;
string_view current_string = string_view::min();
dict_char_count = 0;
for (uint32_t i = 0; i < num_strings; i += block_size) {
uint32_t cur = (i + t < num_strings) ? dict_data[i + t] : 0;
uint32_t cur_len = 0;
bool is_dupe = false;
if (i + t < num_strings) {
current_string = s->stripe.leaf_column->element<string_view>(cur);
cur_len = current_string.size_bytes();
}
if (i + t != 0 && i + t < num_strings) {
uint32_t prev = dict_data[i + t - 1];
is_dupe = (current_string == (s->stripe.leaf_column->element<string_view>(prev)));
}
dict_char_count += (is_dupe) ? 0 : cur_len;
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (!t) { s->total_dupes += dupes_in_block; }
if (i + t < num_strings) {
dict_index[cur] = i + t - dupes_before;
if (!is_dupe && dupes_before != 0) { dict_data[i + t - dupes_before] = cur; }
}
__syncthreads();
}
dict_char_count = block_reduce(temp_storage.reduce_storage).Sum(dict_char_count);
if (t == 0) {
stripes[stripe_id][col_id].num_strings = num_strings - s->total_dupes;
stripes[stripe_id][col_id].dict_char_count = dict_char_count;
}
}
void InitDictionaryIndices(device_span<orc_column_device_view const> orc_columns,
device_2dspan<DictionaryChunk> chunks,
device_span<device_span<uint32_t>> dict_data,
device_span<device_span<uint32_t>> dict_index,
device_span<device_span<uint32_t>> tmp_indices,
device_2dspan<rowgroup_rows const> rowgroup_bounds,
device_span<uint32_t const> str_col_indexes,
rmm::cuda_stream_view stream)
{
static constexpr int block_size = 512;
dim3 dim_block(block_size, 1);
dim3 dim_grid(str_col_indexes.size(), rowgroup_bounds.size().first);
gpuInitDictionaryIndices<block_size><<<dim_grid, dim_block, 0, stream.value()>>>(
chunks, orc_columns, dict_data, dict_index, tmp_indices, rowgroup_bounds, str_col_indexes);
}
/**
* @copydoc cudf::io::orc::gpu::BuildStripeDictionaries
*/
void BuildStripeDictionaries(device_2dspan<StripeDictionary> d_stripes_dicts,
host_2dspan<StripeDictionary const> h_stripe_dicts,
device_2dspan<DictionaryChunk const> chunks,
rmm::cuda_stream_view stream)
{
auto const num_stripes = h_stripe_dicts.size().first;
auto const num_columns = h_stripe_dicts.size().second;
dim3 dim_block(1024, 1); // 1024 threads per chunk
dim3 dim_grid_build(num_columns, num_stripes);
gpuCompactChunkDictionaries<<<dim_grid_build, dim_block, 0, stream.value()>>>(d_stripes_dicts,
chunks);
for (uint32_t stripe_idx = 0; stripe_idx < num_stripes; ++stripe_idx) {
for (auto const& stripe_dict : h_stripe_dicts[stripe_idx]) {
if (stripe_dict.dict_data != nullptr) {
auto const dict_data_ptr = thrust::device_pointer_cast(stripe_dict.dict_data);
auto const string_column = stripe_dict.leaf_column;
// NOTE: Requires the --expt-extended-lambda nvcc flag
thrust::sort(rmm::exec_policy(stream),
dict_data_ptr,
dict_data_ptr + stripe_dict.num_strings,
[string_column] __device__(const uint32_t& lhs, const uint32_t& rhs) {
return string_column->element<string_view>(lhs) <
string_column->element<string_view>(rhs);
});
}
}
}
gpuBuildStripeDictionaries<1024>
<<<dim_grid_build, dim_block, 0, stream.value()>>>(d_stripes_dicts);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
0f0d62e4ca3ccdd2ef02941f101e55e91a66a305.hip | // !!! This is a file automatically generated by hipify!!!
int arraySize = 1024*1024;
int blockSize, minGridSize, gridSize, maxActiveBlocks;
hipOccupancyMaxPotentialBlockSize(
&minGridSize, &blockSize, MyKernel, 0, 0);
gridSize = (arraySize + blockSize - 1) / blockSize;
// MyKerel<<<gridSize,blockSize>>>(args);
hipOccupancyMaxActiveBlocksPerMultiprocessor(
&maxActiveBlocks, MyKernel, blockSize, 0);
int dev; hipDeviceProp_t p;
hipGetDevice(&dev);
hipGetDeviceProperties(&p, dev);
// calculate theoretical occupancy
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize);
| 0f0d62e4ca3ccdd2ef02941f101e55e91a66a305.cu | int arraySize = 1024*1024;
int blockSize, minGridSize, gridSize, maxActiveBlocks;
cudaOccupancyMaxPotentialBlockSize(
&minGridSize, &blockSize, MyKernel, 0, 0);
gridSize = (arraySize + blockSize - 1) / blockSize;
// MyKerel<<<gridSize,blockSize>>>(args);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&maxActiveBlocks, MyKernel, blockSize, 0);
int dev; cudaDeviceProp p;
cudaGetDevice(&dev);
cudaGetDeviceProperties(&p, dev);
// calculate theoretical occupancy
float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize);
|
a385cbf188c08128918f9503e1f14e3efb0389d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/funcs/sparse/scatter.cu.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
void Conv3dGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* out,
DenseTensor* rulebook) {
// update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight)
const auto& x_dims = x.dims();
const auto& kernel_dims = kernel.dims();
int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
DDim out_dims = {1, 1, 1, 1, 1};
std::vector<int> kernel_sizes(kernel_dims.size());
for (int i = 0; i < kernel_dims.size(); i++) {
kernel_sizes[i] = kernel_dims[i];
}
std::vector<int> subm_paddings(paddings), subm_strides(strides);
if (subm) {
// the out shape of subm_conv is same as input shape
// reset the padding=kernel_size/2 and strides=1
phi::funcs::sparse::ResetSubmKernelSizeAndStrides(
kernel.dims(), &subm_paddings, &subm_strides);
}
phi::funcs::sparse::GetOutShape(
x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims);
const int in_channels = kernel_dims[3];
const int out_channels = kernel_dims[4];
std::vector<int> offsets(kernel_size + 1), h_counter(kernel_size);
// Second algorithm:
// https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf
// 1. product rulebook
DenseTensorMeta counter_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensorMeta offsets_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta));
DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW);
DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta));
DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta));
int n = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
kernel_sizes,
subm_paddings,
dilations,
subm_strides,
out_dims,
subm,
rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
&h_counter,
&offsets);
const int* counter_ptr = counter_per_kernel.data<int>();
const int* offsets_ptr = counter_per_kernel.data<int>();
const IntT* rulebook_ptr = rulebook->data<IntT>();
// 2. gather
DenseTensorMeta in_features_meta(
x.dtype(), {n, in_channels}, DataLayout::NCHW);
DenseTensorMeta out_features_meta(
x.dtype(), {n, out_channels}, DataLayout::NCHW);
phi::DenseTensor in_features =
phi::Empty(dev_ctx, std::move(in_features_meta));
phi::DenseTensor out_features =
phi::Empty(dev_ctx, std::move(out_features_meta));
T* in_features_ptr = in_features.data<T>();
T* out_features_ptr = out_features.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &out_features, static_cast<T>(0.0f));
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1);
hipLaunchKernelGGL(( GatherKernel<T, IntT>), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), x.non_zero_elements().data<T>(),
rulebook_ptr + n,
in_features_ptr,
n,
in_channels);
// 3. call gemm for every werght
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
auto* out_values = out->mutable_non_zero_elements();
T* out_values_ptr = out_values->data<T>();
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (h_counter[i] <= 0) {
continue;
}
// call gemm: (n, in_channels) * (in_channels, out_channels)
const int M = h_counter[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * K * N;
T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels;
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1),
tmp_in_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_out_ptr);
}
// 4. scatter
if (subm) {
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * out_channels, 1);
hipLaunchKernelGGL(( phi::funcs::ScatterCUDAKernel<T, IntT>)
, dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), out_features_ptr,
rulebook_ptr + 2 * n,
out_values_ptr,
n,
out_channels,
false);
} else {
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, out->nnz() * out_channels, 1);
hipLaunchKernelGGL(( phi::funcs::sparse::ScatterKernel<T>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), out_features_ptr,
unique_value.data<int>(),
out_index.data<int>(),
out->nnz(),
n,
out_channels,
out_values_ptr);
}
}
/**
* x: (N, D, H, W, C)
* kernel: (D, H, W, C, OC)
* out: (N, D, H, W, OC)
**/
template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] {
Conv3dGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
dilations,
strides,
groups,
subm,
out,
rulebook);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
| a385cbf188c08128918f9503e1f14e3efb0389d4.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/convolution_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/scatter.cu.h"
#include "paddle/phi/kernels/funcs/sparse/scatter.cu.h"
#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
void Conv3dGPUKernel(const GPUContext& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* out,
DenseTensor* rulebook) {
// update padding and dilation
// Currently, only support x.layout is NDHWC, groups = 1
// if x.layout != NDHWC then transpose(x), transpose(weight)
const auto& x_dims = x.dims();
const auto& kernel_dims = kernel.dims();
int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2];
DDim out_dims = {1, 1, 1, 1, 1};
std::vector<int> kernel_sizes(kernel_dims.size());
for (int i = 0; i < kernel_dims.size(); i++) {
kernel_sizes[i] = kernel_dims[i];
}
std::vector<int> subm_paddings(paddings), subm_strides(strides);
if (subm) {
// the out shape of subm_conv is same as input shape
// reset the padding=kernel_size/2 and strides=1
phi::funcs::sparse::ResetSubmKernelSizeAndStrides(
kernel.dims(), &subm_paddings, &subm_strides);
}
phi::funcs::sparse::GetOutShape(
x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims);
const int in_channels = kernel_dims[3];
const int out_channels = kernel_dims[4];
std::vector<int> offsets(kernel_size + 1), h_counter(kernel_size);
// Second algorithm:
// https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf
// 1. product rulebook
DenseTensorMeta counter_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensorMeta offsets_meta(
DataType::INT32, {kernel_size}, DataLayout::NCHW);
DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta));
DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta));
DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW);
DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta));
DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta));
int n = ProductRuleBook<T, GPUContext, IntT>(dev_ctx,
x,
kernel_sizes,
subm_paddings,
dilations,
subm_strides,
out_dims,
subm,
rulebook,
&counter_per_kernel,
&offsets_per_kernel,
&out_index,
&unique_value,
out,
&h_counter,
&offsets);
const int* counter_ptr = counter_per_kernel.data<int>();
const int* offsets_ptr = counter_per_kernel.data<int>();
const IntT* rulebook_ptr = rulebook->data<IntT>();
// 2. gather
DenseTensorMeta in_features_meta(
x.dtype(), {n, in_channels}, DataLayout::NCHW);
DenseTensorMeta out_features_meta(
x.dtype(), {n, out_channels}, DataLayout::NCHW);
phi::DenseTensor in_features =
phi::Empty(dev_ctx, std::move(in_features_meta));
phi::DenseTensor out_features =
phi::Empty(dev_ctx, std::move(out_features_meta));
T* in_features_ptr = in_features.data<T>();
T* out_features_ptr = out_features.data<T>();
phi::funcs::SetConstant<GPUContext, T> set_zero;
set_zero(dev_ctx, &out_features, static_cast<T>(0.0f));
auto config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1);
GatherKernel<T, IntT><<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(x.non_zero_elements().data<T>(),
rulebook_ptr + n,
in_features_ptr,
n,
in_channels);
// 3. call gemm for every werght
auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx);
auto* out_values = out->mutable_non_zero_elements();
T* out_values_ptr = out_values->data<T>();
const T* kernel_ptr = kernel.data<T>();
for (int i = 0; i < kernel_size; i++) {
if (h_counter[i] <= 0) {
continue;
}
// call gemm: (n, in_channels) * (in_channels, out_channels)
const int M = h_counter[i];
const int K = in_channels;
const int N = out_channels;
T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels;
const T* tmp_kernel_ptr = kernel_ptr + i * K * N;
T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels;
blas.GEMM(CblasNoTrans,
CblasNoTrans,
M,
N,
K,
static_cast<T>(1),
tmp_in_ptr,
tmp_kernel_ptr,
static_cast<T>(0),
tmp_out_ptr);
}
// 4. scatter
if (subm) {
set_zero(dev_ctx, out_values, static_cast<T>(0.0f));
config =
phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * out_channels, 1);
phi::funcs::ScatterCUDAKernel<T, IntT>
<<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(out_features_ptr,
rulebook_ptr + 2 * n,
out_values_ptr,
n,
out_channels,
false);
} else {
config = phi::backends::gpu::GetGpuLaunchConfig1D(
dev_ctx, out->nnz() * out_channels, 1);
phi::funcs::sparse::ScatterKernel<T>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(out_features_ptr,
unique_value.data<int>(),
out_index.data<int>(),
out->nnz(),
n,
out_channels,
out_values_ptr);
}
}
/**
* x: (N, D, H, W, C)
* kernel: (D, H, W, C, OC)
* out: (N, D, H, W, OC)
**/
template <typename T, typename Context>
void Conv3dKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& kernel,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const bool subm,
SparseCooTensor* out,
DenseTensor* rulebook) {
PD_VISIT_INTEGRAL_TYPES(
x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] {
Conv3dGPUKernel<T, data_t>(dev_ctx,
x,
kernel,
paddings,
dilations,
strides,
groups,
subm,
out,
rulebook);
}));
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(sparse_conv3d,
GPU,
ALL_LAYOUT,
phi::sparse::Conv3dKernel,
float,
double,
phi::dtype::float16) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
|
3b7fae174488c29a794a987503ed83c758709a14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define BDIMX 32
#define BDIMY 32
#define PAD 1
// dynamic shared memory with Padding.
// This would avoid bank conflicts while writing data in shared memory, since all column writing would spread across banks diagonally.
__global__ void smem_set_col_read_row_dynamic_pad(int* out)
{
extern __shared__ int tile[];
int rowIndex = threadIdx.y * (blockDim.x + PAD) + threadIdx.x;
int colIndex = threadIdx.x * (blockDim.x + PAD) + threadIdx.y;
tile[colIndex] = colIndex;
__syncthreads();
out[rowIndex] = tile[rowIndex];
}
__global__ void smem_set_col_read_row_dynamic(int* out)
{
extern __shared__ int tile[];
int rowIndex = threadIdx.y * blockDim.x + threadIdx.x;
int colIndex = threadIdx.x * blockDim.x + threadIdx.y;
tile[colIndex] = colIndex;
__syncthreads();
out[rowIndex] = tile[rowIndex];
}
// Set values in shared memory in row major order, so that there will not be any bank conflicts.
// access mode 64bit
//------------------------------------------------------------------------
//| B1 | B2 | B3 | ....................................| B31 |
//| 0 | 32| 1 | 33 | 2 | 34 |.....................................| 31 | 63 |
//------------------------------------------------------------------------
// Read shared memory in column major, so there will be bank conflicts.
__global__ void smem_set_row_read_col(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.y][threadIdx.x] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
// Set values in shared memory in column major order, so there will be bank conflicts.
// Read from shared memory in row major order, so therer will be no bank conflicts. It will serve data in one transaction.
__global__ void smem_set_col_read_row(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.x][threadIdx.y] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x];
}
// Read and write in shared memory in row major, so no conflicts.
__global__ void smem_set_row_read_row(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.y][threadIdx.x] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x];
}
int main(int argc, char** argv)
{
int smemconfig = 0;
// set access mode to 32 bit or 64 bit.
if (smemconfig == 0)
{
hipDeviceSetSharedMemConfig(hipSharedMemConfig::hipSharedMemBankSizeEightByte);
}
else
{
hipDeviceSetSharedMemConfig(hipSharedMemConfig::hipSharedMemBankSizeFourByte);
}
int* d_data;
hipMalloc((int**)&d_data, BDIMX * BDIMY * sizeof(int));
dim3 block(BDIMX, BDIMY);
dim3 grid(1, 1);
smem_set_row_read_col << <grid, block >> > (d_data);
hipDeviceSynchronize();
smem_set_col_read_row << <grid, block >> > (d_data);
hipDeviceSynchronize();
smem_set_row_read_row << <grid, block >> > (d_data);
hipDeviceSynchronize();
smem_set_col_read_row_dynamic << <grid, block, sizeof(int) * BDIMX * BDIMY >> > (d_data);
hipDeviceSynchronize();
smem_set_col_read_row_dynamic_pad << <grid, block, sizeof(int)* (BDIMX + PAD) * BDIMY >> > (d_data);
hipDeviceSynchronize();
hipFree(d_data);
hipDeviceReset();
return 0;
}
| 3b7fae174488c29a794a987503ed83c758709a14.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define BDIMX 32
#define BDIMY 32
#define PAD 1
// dynamic shared memory with Padding.
// This would avoid bank conflicts while writing data in shared memory, since all column writing would spread across banks diagonally.
__global__ void smem_set_col_read_row_dynamic_pad(int* out)
{
extern __shared__ int tile[];
int rowIndex = threadIdx.y * (blockDim.x + PAD) + threadIdx.x;
int colIndex = threadIdx.x * (blockDim.x + PAD) + threadIdx.y;
tile[colIndex] = colIndex;
__syncthreads();
out[rowIndex] = tile[rowIndex];
}
__global__ void smem_set_col_read_row_dynamic(int* out)
{
extern __shared__ int tile[];
int rowIndex = threadIdx.y * blockDim.x + threadIdx.x;
int colIndex = threadIdx.x * blockDim.x + threadIdx.y;
tile[colIndex] = colIndex;
__syncthreads();
out[rowIndex] = tile[rowIndex];
}
// Set values in shared memory in row major order, so that there will not be any bank conflicts.
// access mode 64bit
//------------------------------------------------------------------------
//| B1 | B2 | B3 | ....................................| B31 |
//| 0 | 32| 1 | 33 | 2 | 34 |.....................................| 31 | 63 |
//------------------------------------------------------------------------
// Read shared memory in column major, so there will be bank conflicts.
__global__ void smem_set_row_read_col(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.y][threadIdx.x] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.x][threadIdx.y];
}
// Set values in shared memory in column major order, so there will be bank conflicts.
// Read from shared memory in row major order, so therer will be no bank conflicts. It will serve data in one transaction.
__global__ void smem_set_col_read_row(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.x][threadIdx.y] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x];
}
// Read and write in shared memory in row major, so no conflicts.
__global__ void smem_set_row_read_row(int* out)
{
int idx = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ int tile[BDIMX][BDIMY];
tile[threadIdx.y][threadIdx.x] = idx;
// wait till all threads in thread block finished setting value in shared memory
__syncthreads();
out[idx] = tile[threadIdx.y][threadIdx.x];
}
int main(int argc, char** argv)
{
int smemconfig = 0;
// set access mode to 32 bit or 64 bit.
if (smemconfig == 0)
{
cudaDeviceSetSharedMemConfig(cudaSharedMemConfig::cudaSharedMemBankSizeEightByte);
}
else
{
cudaDeviceSetSharedMemConfig(cudaSharedMemConfig::cudaSharedMemBankSizeFourByte);
}
int* d_data;
cudaMalloc((int**)&d_data, BDIMX * BDIMY * sizeof(int));
dim3 block(BDIMX, BDIMY);
dim3 grid(1, 1);
smem_set_row_read_col << <grid, block >> > (d_data);
cudaDeviceSynchronize();
smem_set_col_read_row << <grid, block >> > (d_data);
cudaDeviceSynchronize();
smem_set_row_read_row << <grid, block >> > (d_data);
cudaDeviceSynchronize();
smem_set_col_read_row_dynamic << <grid, block, sizeof(int) * BDIMX * BDIMY >> > (d_data);
cudaDeviceSynchronize();
smem_set_col_read_row_dynamic_pad << <grid, block, sizeof(int)* (BDIMX + PAD) * BDIMY >> > (d_data);
cudaDeviceSynchronize();
cudaFree(d_data);
cudaDeviceReset();
return 0;
}
|
14e0050a44c561eb01ab82314b717e8efc344b01.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#define N 4
#define M 8
__global__ void composition(int *a, int *b, int *c)
{
int aEnd = M * N * blockIdx.y + M - 1;
int sum = 0;
for (int aBegin = M * N * blockIdx.y, bBegin = N * blockIdx.x; aBegin <= aEnd; aBegin += N, bBegin += N * M) {
__shared__ int as[N*N];
__shared__ int bs[N*N];
as[N*threadIdx.y+threadIdx.x] = a[aBegin + M * threadIdx.y + threadIdx.x];
bs[N*threadIdx.y+threadIdx.x] = b[bBegin + M * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < N; k++)
sum += as[threadIdx.y*N+k] * bs[k*N+threadIdx.x];
__syncthreads();
}
c[M * N * blockIdx.y + N * blockIdx.x + M * threadIdx.y + threadIdx.x] = sum;
}
__host__ int main(void)
{
int a[M*M], b[M*M], c[M*M];
for (int i = 0; i < M*M; i++)
{
a[i] = i;
b[i] = i * i;
}
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", a[i*M + j]);
}
printf("\n");
}
printf("-------------------------------- \n");
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", b[i*M + j]);
}
printf("\n");
}
printf("-------------------------------- \n");
int* devA;
int* devB;
int* devC;
hipMalloc((void**)&devA, sizeof(int) * M*M);
hipMalloc((void**)&devB, sizeof(int) * M*M);
hipMalloc((void**)&devC, sizeof(int) * M*M);
hipMemcpy(devA, a, sizeof(int) * M*M, hipMemcpyHostToDevice);
hipMemcpy(devB, b, sizeof(int) * M*M, hipMemcpyHostToDevice);
dim3 blocks(M / N, M / N);
dim3 threads(N, N);
composition << <blocks, threads >> > (devA, devB, devC);
hipEvent_t syncEvent;
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipMemcpy(c, devC, sizeof(int) * M*M, hipMemcpyDeviceToHost);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", c[i*M + j]);
}
printf("\n");
}
hipEventDestroy(syncEvent);
hipFree(devA);
hipFree(devB);
hipFree(devC);
std::system("pause");
return 0;
} | 14e0050a44c561eb01ab82314b717e8efc344b01.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#define N 4
#define M 8
__global__ void composition(int *a, int *b, int *c)
{
int aEnd = M * N * blockIdx.y + M - 1;
int sum = 0;
for (int aBegin = M * N * blockIdx.y, bBegin = N * blockIdx.x; aBegin <= aEnd; aBegin += N, bBegin += N * M) {
__shared__ int as[N*N];
__shared__ int bs[N*N];
as[N*threadIdx.y+threadIdx.x] = a[aBegin + M * threadIdx.y + threadIdx.x];
bs[N*threadIdx.y+threadIdx.x] = b[bBegin + M * threadIdx.y + threadIdx.x];
__syncthreads();
for (int k = 0; k < N; k++)
sum += as[threadIdx.y*N+k] * bs[k*N+threadIdx.x];
__syncthreads();
}
c[M * N * blockIdx.y + N * blockIdx.x + M * threadIdx.y + threadIdx.x] = sum;
}
__host__ int main(void)
{
int a[M*M], b[M*M], c[M*M];
for (int i = 0; i < M*M; i++)
{
a[i] = i;
b[i] = i * i;
}
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", a[i*M + j]);
}
printf("\n");
}
printf("-------------------------------- \n");
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", b[i*M + j]);
}
printf("\n");
}
printf("-------------------------------- \n");
int* devA;
int* devB;
int* devC;
cudaMalloc((void**)&devA, sizeof(int) * M*M);
cudaMalloc((void**)&devB, sizeof(int) * M*M);
cudaMalloc((void**)&devC, sizeof(int) * M*M);
cudaMemcpy(devA, a, sizeof(int) * M*M, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeof(int) * M*M, cudaMemcpyHostToDevice);
dim3 blocks(M / N, M / N);
dim3 threads(N, N);
composition << <blocks, threads >> > (devA, devB, devC);
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(c, devC, sizeof(int) * M*M, cudaMemcpyDeviceToHost);
for (int i = 0; i < M; i++)
{
for (int j = 0; j < M; j++) {
printf("%d ", c[i*M + j]);
}
printf("\n");
}
cudaEventDestroy(syncEvent);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
std::system("pause");
return 0;
} |
ba9b48f1f76905b353aa8803fe9f92bef4d47a15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <hip/hip_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(
const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, T* data_col_,
hipStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel<T>)
, dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im,
kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, batch_size, channels,
deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias,
size_t step_batch, size_t step_channel,
size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
output[index] += bias[(index % step_batch) / step_channel];
}
}
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias,
size_t batch, size_t channel, size_t height,
size_t width, hipStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
hipLaunchKernelGGL(( output_add_bias_kernel), dim3(GET_BLOCKS(n)), dim3(THREADS_PER_BLOCK), 0, stream,
output, bias, step_batch, step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias,
const scalar_t* offset, const scalar_t* mask, scalar_t* output,
void* workspace, int batch, int channels, int height, int width,
int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h,
int pad_w, int pad_h, int dilation_w, int dilation_h, int group,
int deformable_group, int im2col_step, hipblasHandle_t cublas_handle,
hipStream_t stream) {
size_t sizeof_dtype = sizeof(scalar_t);
bool with_bias = (bias != nullptr);
im2col_step = ::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int channels_kernel = channels / group;
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step =
deformable_group * kernel_h * kernel_w * 2 * height * width;
const size_t mask_step =
deformable_group * kernel_h * kernel_w * height * width;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step =
channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step =
channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width,
height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
// hipMemsetAsync(out_buffer_start, 0, 1, stream);
cublasGemmWrap<scalar_t>(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k,
&alpha, col_start, n, weight_start, k, &beta,
out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out,
width_out, stream);
}
}
void ModulatedDeformConvForwardCUDAKernelLauncher_float(
const float* input, const float* weight, const float* bias,
const float* offset, const float* mask, float* output, void* workspace,
int batch, int channels, int height, int width, int channels_out,
int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group,
int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream) {
ModulatedDeformConvForwardCUDAKernelLauncher<float>(
input, weight, bias, offset, mask, output, workspace, batch, channels,
height, width, channels_out, kernel_w, kernel_h, stride_w, stride_h,
pad_w, pad_h, dilation_w, dilation_h, group, deformable_group,
im2col_step, cublas_handle, stream);
}
| ba9b48f1f76905b353aa8803fe9f92bef4d47a15.cu | // Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <cuda_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv_cuda_kernel.cuh"
#include "trt_cuda_helper.cuh"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(
const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int deformable_group, T* data_col_,
cudaStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel<T>
<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im,
kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h,
dilation_w, channel_per_deformable_group, batch_size, channels,
deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias,
size_t step_batch, size_t step_channel,
size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
output[index] += bias[(index % step_batch) / step_channel];
}
}
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias,
size_t batch, size_t channel, size_t height,
size_t width, cudaStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
output_add_bias_kernel<<<GET_BLOCKS(n), THREADS_PER_BLOCK, 0, stream>>>(
output, bias, step_batch, step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias,
const scalar_t* offset, const scalar_t* mask, scalar_t* output,
void* workspace, int batch, int channels, int height, int width,
int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h,
int pad_w, int pad_h, int dilation_w, int dilation_h, int group,
int deformable_group, int im2col_step, cublasHandle_t cublas_handle,
cudaStream_t stream) {
size_t sizeof_dtype = sizeof(scalar_t);
bool with_bias = (bias != nullptr);
im2col_step = std::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int channels_kernel = channels / group;
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step =
deformable_group * kernel_h * kernel_w * 2 * height * width;
const size_t mask_step =
deformable_group * kernel_h * kernel_w * height * width;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step =
channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step =
channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width,
height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, dilation_h, dilation_w, deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
// cudaMemsetAsync(out_buffer_start, 0, 1, stream);
cublasGemmWrap<scalar_t>(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, col_start, n, weight_start, k, &beta,
out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out,
width_out, stream);
}
}
void ModulatedDeformConvForwardCUDAKernelLauncher_float(
const float* input, const float* weight, const float* bias,
const float* offset, const float* mask, float* output, void* workspace,
int batch, int channels, int height, int width, int channels_out,
int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group,
int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream) {
ModulatedDeformConvForwardCUDAKernelLauncher<float>(
input, weight, bias, offset, mask, output, workspace, batch, channels,
height, width, channels_out, kernel_w, kernel_h, stride_w, stride_h,
pad_w, pad_h, dilation_w, dilation_h, group, deformable_group,
im2col_step, cublas_handle, stream);
}
|
ce5406d5f04343880049e1c2f999e173c43e35fb.hip | // !!! This is a file automatically generated by hipify!!!
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
unsigned int total_segments = 0;
unsigned int process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
void* d_v = NULL;
void* s_v = NULL;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
char *mystrtok(char **m,char *s,const char c)
{
char *p=s?s:*m;
if( !*p )
return 0;
*m=strchr(p,c);
if( *m )
*(*m)++=0;
else
*m=p+strlen(p);
return p;
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t max_tmp(CudaSet* a);
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
void filter_op(char *s, char *f, unsigned int segment);
float total_time1 = 0;
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, queue<string> &references, queue<int> &references_nums)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, references, references_nums);
keep = false;
source = 1;
text_source = 1;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
source = 1;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(size_t RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(queue<string> op_sel, queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, size_t RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
size_t sz = RecordCount*char_size[type_index[colIndex]];
hipError_t cudaStatus = hipMalloc(&d, sz);
if(cudaStatus != hipSuccess) {
cout << "Could not allocate " << sz << " bytes of GPU memory for " << RecordCount << " records " << endl;
exit(0);
};
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, size_t i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
size_t old_count;
const unsigned int len = char_size[type_index[colIndex]];
string f1 = load_file_name + "." + int_to_string(cols[colIndex]) + "." + int_to_string(segment);
FILE* f;
f = fopen (f1.c_str() , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
hipMalloc((void **) &d, sz*int_size);
hipMemcpy( d, (void *) hashes, sz*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v1;
hipMalloc((void **) &d_v1, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v1);
thrust::for_each(begin, begin + real_count, ff);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(filtered) {
if(prm_index == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::gather(prm_d.begin(), prm_d.begin() + mRecCount, d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
hipFree(d);
hipFree(d_val);
hipFree(d_v1);
hipFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, size_t i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
size_t old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++) {
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]] + segment*t->maxRecs*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
};
if(filtered) {
if(prm_index == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::gather(prm_d.begin(), prm_d.begin() + mRecCount, d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
delete [] hashes;
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
bool prealloc = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
prealloc = 1;
};
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
if(prealloc)
prealloc_char_size = mRecCount;
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(size_t Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[Recs*char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
if(d_columns_int[type_index[colIndex]].size() > 0) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
};
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
if (d_columns_float[type_index[colIndex]].size() > 0) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
};
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
hipFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < mColumnCount; i++)
deAllocColumnOnDevice(i);
for(unsigned int i=0; i < d_columns_int.size(); i++) {
if(d_columns_int[i].size() > 0) {
d_columns_int[i].resize(0);
d_columns_int[i].shrink_to_fit();
};
};
for(unsigned int i=0; i < d_columns_float.size(); i++) {
if(d_columns_float[i].size() > 0) {
d_columns_float[i].resize(0);
d_columns_float[i].shrink_to_fit();
};
};
if(grp) {
hipFree(grp);
grp = NULL;
};
if(filtered) { // free the sources
string some_field;
map<string,unsigned int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, unsigned int colIndex)
{
// if (RecCount) {
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
hipFree(d_columns_char[type_index[colIndex]]);
void *d;
hipMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
// };
};
void CudaSet::resizeDevice(size_t RecCount)
{
// if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
size_t j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->ref_joins = ref_joins;
a->ref_sets = ref_sets;
a->ref_cols = ref_cols;
for ( map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
a->char_size.push_back(char_size[type_index[i]]);
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
void CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex, size_t offset)
{
std::clock_t start1 = std::clock();
string f1(load_file_name);
f1 += "." + int_to_string(cols[colIndex]) + "." + int_to_string(segNum);
unsigned int cnt;
FILE* f;
f = fopen(f1.c_str(), "rb" );
if(f == NULL) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
size_t rr;
if(type[colIndex] == 0) {
if(1 > h_columns_int[type_index[colIndex]].size())
h_columns_int[type_index[colIndex]].resize(1);
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
if(cnt > h_columns_int[type_index[colIndex]].size()/8 + 10)
h_columns_int[type_index[colIndex]].resize(cnt/8 + 10);
rr = fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
cout << "Couldn't read " << cnt+52 << " bytes from " << f1 << " ,read only " << rr << endl;
exit(0);
};
}
else if(type[colIndex] == 1) {
if(1 > h_columns_float[type_index[colIndex]].size())
h_columns_float[type_index[colIndex]].resize(1);
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
if(cnt > h_columns_float[type_index[colIndex]].size()/8 + 10)
h_columns_float[type_index[colIndex]].resize(cnt/8 + 10);
rr = fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
cout << "Couldn't read " << cnt+52 << " bytes from " << f1 << endl;
exit(0);
};
}
else {
decompress_char(f, colIndex, segNum, offset);
};
fclose(f);
//if(verbose)
// std::cout<< "read from file time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum, size_t offset)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
hipMalloc((void **) &d, sz*len);
hipMemcpy( d, (void *) d_array, sz*len, hipMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
thrust::device_ptr<unsigned int> param = thrust::device_malloc<unsigned int>(2);
param[1] = fit_count;
param[0] = bits_encoded;
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
//fclose(f);
void* d_val;
hipMalloc((void **) &d_val, vals_count*8);
hipMemcpy(d_val, (void *) int_array, vals_count*8, hipMemcpyHostToDevice);
delete[] int_array;
void* d_int;
hipMalloc((void **) &d_int, real_count*4);
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)thrust::raw_pointer_cast(param));
thrust::for_each(begin, begin + real_count, ff);
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + offset*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
mRecCount = real_count;
hipFree(d);
hipFree(d_val);
thrust::device_free(param);
hipFree(d_int);
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_int[type_index[colIndex]].begin() + offset);
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch) {
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_float[type_index[colIndex]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
default :
if(!alloced_switch) {
hipMemcpy(d_columns_char[type_index[colIndex]] + char_size[type_index[colIndex]]*offset, h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
}
else
hipMemcpy(alloced_tmp , h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
readSegmentsFromFile(segment,colIndex, offset);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + offset), h_columns_int[type_index[colIndex]].data(), d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data(), d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + offset) , h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
hipMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, hipMemcpyHostToDevice);
};
}
else {
size_t totals = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(s_v == NULL)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colIndex, cnt);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totals), h_columns_int[type_index[colIndex]].data(), d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totals) , h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totals));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totals, long_to_float());
}
// else uncompressed float
//hipMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, hipMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = totals;
};
}
void CudaSet::CopyColumnToHost(int colIndex, size_t offset, size_t RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
hipMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, hipMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
unsigned int colIndex;
if(grp)
hipFree(grp);
CUDA_SAFE_CALL(hipMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, size_t recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, size_t recCount, bool is_decimal)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
decimal[colIndex] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!op_sort.empty()) { //sort the segment
//copy the key columns to device
queue<string> sf(op_sort);
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*max_char(this, sf)));
string sort_type = "ASC";
while(!sf.empty()) {
int colInd = columnNames[sf.front()];
allocColumnOnDevice(colInd, maxRecs);
CopyColumnToGpu(colInd);
if (type[colInd] == 0)
update_permutation(d_columns_int[type_index[colInd]], raw_ptr, mRecCount, sort_type, (int_type*)temp);
else if (type[colInd] == 1)
update_permutation(d_columns_float[type_index[colInd]], raw_ptr, mRecCount, sort_type, (float_type*)temp);
else {
update_permutation_char(d_columns_char[type_index[colInd]], raw_ptr, mRecCount, sort_type, (char*)temp, char_size[type_index[colInd]]);
};
deAllocColumnOnDevice(colInd);
sf.pop();
};
hipFree(temp);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
unsigned int old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i< mColumnCount; i++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
new_offset = 0;
if(!op_sort.empty()) {
allocColumnOnDevice(i, maxRecs);
CopyColumnToGpu(i);
};
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[type_index[i]].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[type_index[i]], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[type_index[i]], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0);
};
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[type_index[i]], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[type_index[i]], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[type_index[i]].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[type_index[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[type_index[i]], h_permutation, mRecCount, t, char_size[type_index[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[type_index[i]]*mRecCount, h_columns_char[type_index[i]]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, i, partition_recs, new_offset);
else
compress_char(str, i, mCount - partition_recs*p, new_offset);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else
compress_char(str, i, mCount, offset);
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, cols[i], total_segments-1);
else {
writeHeader(file_name, cols[i], total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, unsigned int col, unsigned int tot_segs) {
string str = file_name + "." + int_to_string(col);
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, unsigned int col, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + int_to_string(col);
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
idx = cols[columnNames[os.front()]];
if(verbose)
cout << "sorted on " << idx << endl;
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
}
else if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = cols[columnNames[os.front()]];
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
};
}
using namespace mgpu;
void CudaSet::Store(string file_name, char* sep, unsigned int limit, bool binary, bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
//cout << "mCount " << mCount << " " << mRecCount << endl;
if(binary == 0) {
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
char buffer [33];
string ss;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
for (map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
}
else if(text_source) { //writing a binary file using a text file as a source
// time to perform join checks on REFERENCES dataset segments
for(unsigned int i = 0; i< mColumnCount; i++) {
if(ref_sets.find(i) != ref_sets.end()) {
string f1 = file_name + "." + int_to_string(i) + ".refs";
fstream f_file;
if(total_segments == 0) {
f_file.open(f1.c_str(), ios::out|ios::trunc|ios::binary);
unsigned int len = ref_sets[i].size();
f_file.write((char *)&len, 4);
f_file.write(ref_sets[i].c_str(), len);
f_file.write((char *)&ref_cols[i], 4);
}
else {
f_file.open(f1.c_str(), ios::out|ios::app|ios::binary);
};
f1 = ref_sets[i] + "." + int_to_string(ref_cols[i]) + ".header";
FILE* ff = fopen(f1.c_str(), "rb");
if(ff == NULL) {
cout << "Couldn't open file " << f1 << endl;
exit(0);
};
unsigned int ref_segCount, ref_maxRecs;
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_maxRecs, 4, 1, ff);
fclose(ff);
//cout << "CALC " << i << " " << ref_sets[i] << " " << ref_cols[i] << " " << ref_segCount << endl;
CudaSet* a = new CudaSet(maxRecs, 1);
a->h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
a->d_columns_int.push_back(thrust::device_vector<int_type>(ref_maxRecs));
a->type[0] = 0;
a->type_index[0] = 0;
a->not_compressed = 0;
a->load_file_name = ref_sets[i];
a->cols[0] = 1;
MGPU_MEM(int) aIndicesDevice, bIndicesDevice;
size_t res_count;
if(!onDevice(i)) {
allocColumnOnDevice(i, maxRecs);
};
CopyColumnToGpu(i);
thrust::sort(d_columns_int[type_index[i]].begin(), d_columns_int[type_index[i]].begin() + mRecCount);
f_file.write((char *)&total_segments, 4);
f_file.write((char *)&ref_segCount, 4);
for(unsigned int z = 0; z < ref_segCount; z++) {
a->CopyColumnToGpu(0, z, 0);
thrust::sort(a->d_columns_int[0].begin(), a->d_columns_int[0].begin() + a->mRecCount);
// check if there is a join result
//cout << "join " << mRecCount << " " << a->mRecCount << endl;
res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(d_columns_int[type_index[i]].data()), mRecCount,
thrust::raw_pointer_cast(a->d_columns_int[0].data()), a->mRecCount,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
//cout << "RES " << i << " " << total_segments << ":" << z << " " << res_count << endl;
f_file.write((char *)&z, 4);
f_file.write((char *)&res_count, 8);
};
f_file.close();
a->deAllocColumnOnDevice(0);
a->free();
};
};
compress(file_name, 0, 1, 0, mCount);
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for (map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount);
offset = offset + mCount;
};
};
};
}
void CudaSet::compress_char(string file_name, unsigned int index, size_t mCount, size_t offset)
{
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded, ss;
unsigned int len = char_size[type_index[index]];
for (unsigned int i = 0 ; i < mCount; i++) {
string f(h_columns_char[type_index[index]] + (i+offset)*len, len);
if((iter = dict.find(f)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
ss = (unsigned int)dict.size();
dict[f] = ss;
dict_val.push_back(ss);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = (unsigned int)dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool CudaSet::LoadBigFile(FILE* file_p)
{
char line[1000];
unsigned int current_column, count = 0, index;
char *p,*t;
const char* sep = separator.c_str();
map<unsigned int,unsigned int> col_map;
for(unsigned int i = 0; i < mColumnCount; i++) {
col_map[cols[i]] = i;
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
strtok(line, "\n");
current_column = 0;
for(t=mystrtok(&p,line,*sep); t; t=mystrtok(&p,0,*sep)) {
current_column++;
if(col_map.find(current_column) == col_map.end()) {
continue;
};
index = col_map[current_column];
if (type[index] == 0) {
if (strchr(t,'-') == NULL) {
(h_columns_int[type_index[index]])[count] = atoll(t);
}
else { // handling possible dates
strncpy(t+4,t+5,2);
strncpy(t+6,t+8,2);
t[8] = '\0';
(h_columns_int[type_index[index]])[count] = atoll(t);
};
}
else if (type[index] == 1) {
(h_columns_float[type_index[index]])[count] = atoff(t);
}
else {//char
strcpy(h_columns_char[type_index[index]] + count*char_size[type_index[index]], t);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]]) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
}
else {
if(type[i] == 0 ) {
h_columns_int[type_index[i]].resize(0);
h_columns_int[type_index[i]].shrink_to_fit();
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(0);
h_columns_float[type_index[i]].shrink_to_fit();
};
}
};
prm_d.resize(0);
prm_d.shrink_to_fit();
deAllocOnDevice();
//cout << "dealloced " << name << " " << getFreeMem() << endl;
delete type;
delete decimal;
if(grp_type)
delete grp_type;
delete cols;
if(fil_s)
delete fil_s;
if(fil_f)
delete fil_f;
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
FILE* f;
string f1;
unsigned int cnt, bytes;
prealloc_char_size = 0;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
sorted_fields.push(idx);
if(verbose)
cout << "segment sorted on " << idx << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
presorted_fields.push(idx);
if(verbose)
cout << "presorted on " << idx << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
f1 = file_name + "." + int_to_string(colsRef.front()) + ".0";
f = fopen (f1.c_str() , "rb" );
fread((char *)&bytes, 4, 1, f);
fclose(f);
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + int_to_string(colsRef.front()) + ".0";
f = fopen (f1.c_str() , "rb" );
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
//check the references
f1 = file_name + "." + int_to_string(colsRef.front()-1) + ".refs";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int len;
fread(&len, 4, 1, f);
char* array = new char[len];
fread((void*)array, len, 1, f);
ref_sets[i] = array;
delete [] array;
unsigned int z, segs, seg_num, curr_seg;
size_t res_count;
fread((void*)&z, 4, 1, f);
ref_cols[i] = z;
unsigned int bytes_read = fread((void*)&curr_seg, 4, 1, f);
while(bytes_read == 1) {
fread((void*)&segs, 4, 1, f); //ref seg count
//cout << "for " << i << " read " << array << " and " << z << " " << segs << endl;
for(unsigned int j = 0; j < segs; j++) {
fread((void*)&seg_num, 4, 1, f);
fread((void*)&res_count, 8, 1, f);
//cout << "curr_seg " << curr_seg << " " << seg_num << " " << res_count << endl;
if(res_count)
ref_joins[i][curr_seg].insert(seg_num);
else
ref_joins[i][curr_seg].insert(std::numeric_limits<unsigned int>::max());
};
bytes_read = fread((void*)&curr_seg, 4, 1, f);
};
fclose(f);
};
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
//h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >(bytes/8 + 10));
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
//cout << "creating " << name << " " << nameRef.front() << " " << bytes/8 + 10 << endl;
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
//h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(bytes/8 + 10));
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
//h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(bytes/8 + 10));
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, queue<string> &references, queue<int> &references_nums)
{
mColumnCount = (unsigned int)nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
prealloc_char_size = 0;
tmp_table = 0;
filtered = 0;
mRecCount = Recs;
hostRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
if(!references.front().empty()) {
ref_sets[i] = references.front();
ref_cols[i] = references_nums.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
references.pop();
references_nums.pop();
};
};
void CudaSet::initialize(size_t RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
prealloc_char_size = 0;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
filtered = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
unsigned int i = 0;
while(!op_sel.empty()) {
if(!setMap.count(op_sel.front())) {
cout << "coudn't find column " << op_sel.front() << endl;
exit(0);
};
CudaSet* a = varNames[setMap[op_sel.front()]];
if(i == 0)
maxRecs = a->maxRecs;
index = a->columnNames[op_sel.front()];
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if(a->columnNames.find(q_cnt.front()) != a->columnNames.end() || b->columnNames.find(q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,unsigned int>::iterator it;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
i = 0;
while(!op_sel.empty() && (columnNames.find(op_sel.front()) == columnNames.end())) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
}
else if((it = b->columnNames.find(op_sel.front())) != b->columnNames.end()) {
index = it->second;
columnNames[op_sel.front()] = i;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
};
i++;
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
size_t max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
if(!a->onDevice(idx)) {
a->allocColumnOnDevice(idx, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(tindex, idx, a, t, count, a->mRecCount);
}
else {
mycopy(tindex, idx, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
size_t getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return a->hostRecCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz) {
a->resizeDevice(count);
a->devRecCount = count+a->mRecCount;
};
};
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[setMap[fields.front()]];
alloced_switch = 1;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
a->orig_segs[t->load_file_name].insert(segment);
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm_index = val;
if (val == 'A') {
b->mRecCount = getSegmentRecCount(a,segment);
}
else if (val == 'N') {
b->mRecCount = 0;
}
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (unsigned int)a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (unsigned int)a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
}
else {
hipMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], hipMemcpyDeviceToDevice);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join && right->columnNames.find(f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
rcount = right->maxRecs;
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
};
ct.pop();
};
size_t cnt_r = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
//cout << "RIGHT SEG " << i << " " << cnt_r << " " << right->d_columns_int[1][0] << "-" << right->d_columns_int[1][cnt_r-1] << endl;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char1)
max_char1 = a->char_size[i];
return max_char1;
};
size_t max_char(CudaSet* a, set<string> field_names)
{
size_t max_char1 = 8, i;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char1)
max_char1 = a->char_size[a->type_index[i]];
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8, i;
while (!field_names.empty()) {
i = a->columnNames[field_names.front()];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
field_names.pop();
};
return max_char;
};
size_t max_tmp(CudaSet* a)
{
size_t max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
size_t m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather((void*)permutation, RecCount, (void*)key, (void*)tmp, len);
// stable_sort the permuted keys and update the permutation
if (SortType.compare("DESC") == 0 )
str_sort(tmp, RecCount, permutation, 1, len);
else
str_sort(tmp, RecCount, permutation, 0, len);
}
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(char *s, char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
size_t cnt = 0;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0)
b->prm_d.resize(a->maxRecs);
//cout << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, a, segment);
//cout << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
copyColumns(a, b->fil_value, segment, cnt);
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
if(segment == a->segCount-1)
b->type_index = a->type_index;
hipFree(res);
}
else {
setPrm(a,b,map_check,segment);
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
//cout << "filter res " << b->mRecCount << endl;
//std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void sort_right(CudaSet* right, unsigned int colInd2, string f2, queue<string> op_g, queue<string> op_sel,
bool decimal_join, bool& str_join, size_t& rcount) {
size_t cnt_r = 0;
right->hostRecCount = right->mRecCount;
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = 0; i < right->segCount; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
cnt_r = right->d_columns_int[right->d_columns_int.size()-1].size();
};
// need to allocate all right columns
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, 0, right->segCount);
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
//here we need to make sure that right column is ordered. If not then we order it and keep the permutation
bool sorted;
if(str_join || !decimal_join) {
sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
}
else
sorted = thrust::is_sorted(right->d_columns_float[right->type_index[colInd2]].begin(), right->d_columns_float[right->type_index[colInd2]].begin() + cnt_r);
if(!sorted) {
thrust::device_ptr<unsigned int> v = thrust::device_malloc<unsigned int>(cnt_r);
thrust::sequence(v, v + cnt_r, 0, 1);
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, cnt_r*max_char(right)));
if(str_join) {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v);
}
else {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v);
};
if(!right->not_compressed) {
right->mRecCount = 0;
right->resize(cnt_r);
};
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, right->h_columns_int[right->type_index[colInd2]].begin());
right->deAllocColumnOnDevice(colInd2);
unsigned int i;
while(!op_sel.empty()) {
if (right->columnNames.find(op_sel.front()) != right->columnNames.end()) {
i = right->columnNames[op_sel.front()];
if(i != colInd2) {
queue<string> op_alt1;
op_alt1.push(op_sel.front());
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, 0, right->segCount, 0, 0);
if(right->type[i] == 0) {
thrust::device_ptr<int_type> d_tmp((int_type*)d);
thrust::gather(v, v+cnt_r, right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->h_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::device_ptr<float_type> d_tmp((float_type*)d);
thrust::gather(v, v+cnt_r, right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->h_columns_float[right->type_index[i]].begin());
}
else {
thrust::device_ptr<char> d_tmp((char*)d);
str_gather(thrust::raw_pointer_cast(v), cnt_r, (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[i]]);
hipMemcpy( (void*)right->h_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), cnt_r*right->char_size[right->type_index[i]], hipMemcpyDeviceToHost);
};
right->deAllocColumnOnDevice(i);
};
};
op_sel.pop();
};
thrust::device_free(v);
hipFree(d);
right->not_compressed = 1;
}
}
size_t load_right(CudaSet* right, unsigned int colInd2, string f2, queue<string> op_g, queue<string> op_sel,
queue<string> op_alt, bool decimal_join, bool& str_join,
size_t& rcount, unsigned int start_seg, unsigned int end_seg, bool rsz) {
size_t cnt_r = 0;
//right->hostRecCount = right->mRecCount;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = start_seg; i < end_seg; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
cnt_r = right->d_columns_int[right->d_columns_int.size()-1].size();
};
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, start_seg, end_seg, rsz, 1);
}
else {
cnt_r = load_queue(op_alt, right, str_join, f2, rcount, start_seg, end_seg, rsz, 1);
};
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
if(right->not_compressed) {
queue<string> op_alt1;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (right->columnNames.find(op_alt.front()) != right->columnNames.end()) {
op_alt1.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt1.empty())
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, start_seg, end_seg, 0, 0);
};
return cnt_r;
};
unsigned int calc_right_partition(CudaSet* left, CudaSet* right, queue<string> op_sel) {
unsigned int tot_size = left->maxRecs*8;
while(!op_sel.empty()) {
if (right->columnNames.find(op_sel.front()) != right->columnNames.end()) {
if(right->type[right->columnNames[op_sel.front()]] <= 1) {
tot_size = tot_size + right->maxRecs*8*right->segCount;
}
else {
tot_size = tot_size + right->maxRecs*
right->char_size[right->type_index[right->columnNames[op_sel.front()]]]*
right->segCount;
};
};
op_sel.pop();
};
return right->segCount / ((tot_size/(getFreeMem() - 300000000)) + 1);
};
string int_to_string(int number){
string number_string = "";
char ones_char;
int ones = 0;
while(true){
ones = number % 10;
switch(ones){
case 0: ones_char = '0'; break;
case 1: ones_char = '1'; break;
case 2: ones_char = '2'; break;
case 3: ones_char = '3'; break;
case 4: ones_char = '4'; break;
case 5: ones_char = '5'; break;
case 6: ones_char = '6'; break;
case 7: ones_char = '7'; break;
case 8: ones_char = '8'; break;
case 9: ones_char = '9'; break;
default : cout << ("Trouble converting number to string.");
}
number -= ones;
number_string = ones_char + number_string;
if(number == 0){
break;
}
number = number/10;
}
return number_string;
}
void insert_records(char* f, char* s) {
char buf[4096];
size_t size, maxRecs;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
cout << "couldn't find " << s << endl;
exit(0);
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
cout << "couldn't find " << f << endl;
exit(0);
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_s = a->load_file_name + "." + int_to_string(a->cols[z]) + "." + int_to_string(i);
str_d = b->load_file_name + "." + int_to_string(b->cols[z]) + "." + int_to_string(b->segCount + i);
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
b->reWriteHeader(b->load_file_name, b->cols[z], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[z] == 0) {
thrust::copy(a->h_columns_int[a->type_index[z]].begin(), a->h_columns_int[a->type_index[z]].begin() + a->mRecCount, b->h_columns_int[b->type_index[z]].begin() + oldCount);
}
else if(b->type[z] == 1) {
thrust::copy(a->h_columns_float[a->type_index[z]].begin(), a->h_columns_float[a->type_index[z]].begin() + a->mRecCount, b->h_columns_float[b->type_index[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->type_index[z]] + b->char_size[b->type_index[z]]*oldCount, a->h_columns_char[a->type_index[z]], a->char_size[a->type_index[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = a->mRecCount;
total_max = process_count;
unsigned int segCount = (a->mRecCount/process_count + 1);
size_t offset = 0, mCount;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(a->mRecCount < process_count) {
mCount = a->mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = a->mRecCount - (segCount-1)*process_count;
};
a->compress(b->load_file_name, offset, 0, z - (segCount-1), mCount);
offset = offset + mCount;
};
//update headers
total_count = a->mRecCount + b->mRecCount;
//cout << "and now lets write " << a->mRecCount << " " << b->mRecCount << endl;
for(unsigned int i = 0; i< b->mColumnCount; i++) {
b->writeHeader(b->load_file_name, b->cols[i], total_segments);
};
};
};
void delete_records(char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
cout << "Delete operator is only applicable to disk based sets" << endl;
cout << "for deleting records from derived sets please use filter operator " << endl;
exit(0);
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for (map<string,unsigned int>::iterator it=a->columnNames.begin() ; it != a->columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(a, op_vx);
a->prm_d.resize(a->maxRecs);
a->resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), not_identity<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
//cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
//cout << "rename " << i << " to " << new_seg_count << endl;
if(new_seg_count != i) {
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_old = a->load_file_name + "." + int_to_string(a->cols[z]);
str_old += "." + int_to_string(i);
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
if(a->type[z] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[a->type_index[z]].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[a->type_index[z]], 0);
}
else if(a->type[z] == 1){
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[z]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[a->type_index[z]].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[a->type_index[z]], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[a->type_index[z]].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[a->type_index[z]].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[a->type_index[z]].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
void* t;
CUDA_SAFE_CALL(hipMalloc((void **) &t, tmp*a->char_size[a->type_index[z]]));
apply_permutation_char(a->d_columns_char[a->type_index[z]], (unsigned int*)thrust::raw_pointer_cast(a->prm_d.data()), tmp, (char*)t, a->char_size[a->type_index[z]]);
hipMemcpy(a->h_columns_char[a->type_index[z]], t, a->char_size[a->type_index[z]]*a->mRecCount, hipMemcpyDeviceToHost);
hipFree(t);
a->compress_char(str, z, a->mRecCount, 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
//cout << "rename " << i << " to " << new_seg_count << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_old = a->load_file_name + "." + int_to_string(a->cols[z]);
str_old += "." + int_to_string(i);
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
//cout << "TOTAL REM " << totalRemoved << endl;
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int z = 0; z< a->mColumnCount; z++) {
a->reWriteHeader(a->load_file_name, a->cols[z], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
| ce5406d5f04343880049e1c2f999e173c43e35fb.cu | /*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
unsigned int total_segments = 0;
unsigned int process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
void* d_v = NULL;
void* s_v = NULL;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<string> col_aliases;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string,string> setMap; //map to keep track of column names and set names
struct is_match
{
__host__ __device__
bool operator()(unsigned int x)
{
return x != 4294967295;
}
};
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
struct l_to_ui
{
__host__ __device__
float_type operator()(const int_type x)
{
return (unsigned int)x;
}
};
struct to_zero
{
__host__ __device__
bool operator()(const int_type x)
{
if(x == -1)
return 0;
else
return 1;
}
};
struct div_long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x, const float_type y)
{
return (float_type)x/y;
}
};
// trim from start
static inline std::string <rim(std::string &s) {
s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
return s;
}
// trim from end
static inline std::string &rtrim(std::string &s) {
s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
return s;
}
// trim from both ends
static inline std::string &trim(std::string &s) {
return ltrim(rtrim(s));
}
char *mystrtok(char **m,char *s,const char c)
{
char *p=s?s:*m;
if( !*p )
return 0;
*m=strchr(p,c);
if( *m )
*(*m)++=0;
else
*m=p+strlen(p);
return p;
}
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t max_tmp(CudaSet* a);
size_t getFreeMem();
char zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a, unsigned int segment);
void filter_op(char *s, char *f, unsigned int segment);
float total_time1 = 0;
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, queue<string> &references, queue<int> &references_nums)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, references, references_nums);
keep = false;
source = 1;
text_source = 1;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
keep = false;
source = 1;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(size_t RecordCount, unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(queue<string> op_sel, queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
grp = NULL;
fil_f = NULL;
fil_s = NULL;
grp_type = NULL;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(unsigned int colIndex, size_t RecordCount)
{
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(RecordCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(RecordCount);
else {
void* d;
size_t sz = RecordCount*char_size[type_index[colIndex]];
cudaError_t cudaStatus = cudaMalloc(&d, sz);
if(cudaStatus != cudaSuccess) {
cout << "Could not allocate " << sz << " bytes of GPU memory for " << RecordCount << " records " << endl;
exit(0);
};
d_columns_char[type_index[colIndex]] = (char*)d;
};
};
void CudaSet::decompress_char_hash(unsigned int colIndex, unsigned int segment, size_t i_cnt)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
size_t old_count;
const unsigned int len = char_size[type_index[colIndex]];
string f1 = load_file_name + "." + int_to_string(cols[colIndex]) + "." + int_to_string(segment);
FILE* f;
f = fopen (f1.c_str() , "rb" );
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
unsigned long long int* hashes = new unsigned long long int[sz];
for(unsigned int i = 0; i < sz ; i++) {
hashes[i] = MurmurHash64A(&d_array[i*len], len, hash_seed); // divide by 2 so it will fit into a signed long long
};
void* d;
cudaMalloc((void **) &d, sz*int_size);
cudaMemcpy( d, (void *) hashes, sz*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> dd_int((unsigned long long int*)d);
delete[] d_array;
delete[] hashes;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
thrust::device_ptr<unsigned long long int> mval((unsigned long long int*)d_val);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
// convert bits to ints and then do gather
void* d_v1;
cudaMalloc((void **) &d_v1, 8);
thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1);
dd_v[1] = fit_count;
dd_v[0] = bits_encoded;
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)d_v1);
thrust::for_each(begin, begin + real_count, ff);
thrust::device_ptr<unsigned int> dd_val((unsigned int*)d_int);
if(filtered) {
if(prm_index == 'R') {
thrust::device_ptr<int_type> d_tmp = thrust::device_malloc<int_type>(real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_tmp);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::gather(prm_d.begin(), prm_d.begin() + mRecCount, d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + real_count);
thrust::gather(dd_val, dd_val + real_count, dd_int, d_columns_int[i_cnt].begin() + old_count);
};
cudaFree(d);
cudaFree(d_val);
cudaFree(d_v1);
cudaFree(d_int);
};
// takes a char column , hashes strings, copies them to a gpu
void CudaSet::add_hashed_strings(string field, unsigned int segment, size_t i_cnt)
{
unsigned int colInd2 = columnNames.find(field)->second;
CudaSet *t = varNames[setMap[field]];
if(not_compressed) { // decompressed strings on a host
size_t old_count;
unsigned long long int* hashes = new unsigned long long int[t->mRecCount];
for(unsigned int i = 0; i < t->mRecCount ; i++) {
hashes[i] = MurmurHash64A(t->h_columns_char[t->type_index[colInd2]] + i*t->char_size[t->type_index[colInd2]] + segment*t->maxRecs*t->char_size[t->type_index[colInd2]], t->char_size[t->type_index[colInd2]], hash_seed);
};
if(filtered) {
if(prm_index == 'R') {
thrust::device_ptr<unsigned long long int> d_tmp = thrust::device_malloc<unsigned long long int>(t->mRecCount);
thrust::copy(hashes, hashes+mRecCount, d_tmp);
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::gather(prm_d.begin(), prm_d.begin() + mRecCount, d_tmp, d_columns_int[i_cnt].begin() + old_count);
thrust::device_free(d_tmp);
}
else if(prm_index == 'A') {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
}
else {
old_count = d_columns_int[i_cnt].size();
d_columns_int[i_cnt].resize(old_count + mRecCount);
thrust::copy(hashes, hashes + mRecCount, d_columns_int[i_cnt].begin() + old_count);
}
delete [] hashes;
}
else { // hash the dictionary
decompress_char_hash(colInd2, segment, i_cnt);
};
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
bool prealloc = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
if (mRecCount > prealloc_char_size) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
prealloc = 1;
};
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
if(prealloc)
prealloc_char_size = mRecCount;
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0) {
h_columns_int[type_index[i]].resize(mRecCount);
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(mRecCount);
}
else {
if (h_columns_char[type_index[i]]) {
h_columns_char[type_index[i]] = (char*)realloc(h_columns_char[type_index[i]], mRecCount*char_size[type_index[i]]);
}
else {
h_columns_char[type_index[i]] = new char[mRecCount*char_size[type_index[i]]];
};
};
};
};
void CudaSet::reserve(size_t Recs)
{
for(unsigned int i=0; i <mColumnCount; i++) {
if(type[i] == 0)
h_columns_int[type_index[i]].reserve(Recs);
else if(type[i] == 1)
h_columns_float[type_index[i]].reserve(Recs);
else {
h_columns_char[type_index[i]] = new char[Recs*char_size[type_index[i]]];
if(h_columns_char[type_index[i]] == NULL) {
cout << "Could not allocate on a host " << Recs << " records of size " << char_size[type_index[i]] << endl;
exit(0);
};
prealloc_char_size = Recs;
};
};
};
void CudaSet::deAllocColumnOnDevice(unsigned int colIndex)
{
if (type[colIndex] == 0 && !d_columns_int.empty()) {
if(d_columns_int[type_index[colIndex]].size() > 0) {
d_columns_int[type_index[colIndex]].resize(0);
d_columns_int[type_index[colIndex]].shrink_to_fit();
};
}
else if (type[colIndex] == 1 && !d_columns_float.empty()) {
if (d_columns_float[type_index[colIndex]].size() > 0) {
d_columns_float[type_index[colIndex]].resize(0);
d_columns_float[type_index[colIndex]].shrink_to_fit();
};
}
else if (type[colIndex] == 2 && d_columns_char[type_index[colIndex]] != NULL) {
cudaFree(d_columns_char[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = NULL;
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < mColumnCount; i++)
allocColumnOnDevice(i, RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < mColumnCount; i++)
deAllocColumnOnDevice(i);
for(unsigned int i=0; i < d_columns_int.size(); i++) {
if(d_columns_int[i].size() > 0) {
d_columns_int[i].resize(0);
d_columns_int[i].shrink_to_fit();
};
};
for(unsigned int i=0; i < d_columns_float.size(); i++) {
if(d_columns_float[i].size() > 0) {
d_columns_float[i].resize(0);
d_columns_float[i].shrink_to_fit();
};
};
if(grp) {
cudaFree(grp);
grp = NULL;
};
if(filtered) { // free the sources
string some_field;
map<string,unsigned int>::iterator it=columnNames.begin();
some_field = (*it).first;
if(setMap[some_field].compare(name)) {
CudaSet* t = varNames[setMap[some_field]];
t->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, unsigned int colIndex)
{
// if (RecCount) {
if (type[colIndex] == 0) {
d_columns_int[type_index[colIndex]].resize(mRecCount+RecCount);
}
else if (type[colIndex] == 1)
d_columns_float[type_index[colIndex]].resize(mRecCount+RecCount);
else {
if (d_columns_char[type_index[colIndex]] != NULL)
cudaFree(d_columns_char[type_index[colIndex]]);
void *d;
cudaMalloc((void **) &d, (mRecCount+RecCount)*char_size[type_index[colIndex]]);
d_columns_char[type_index[colIndex]] = (char*)d;
};
// };
};
void CudaSet::resizeDevice(size_t RecCount)
{
// if (RecCount)
for(unsigned int i=0; i < mColumnCount; i++)
resizeDeviceColumn(RecCount, i);
};
bool CudaSet::onDevice(unsigned int i)
{
size_t j = type_index[i];
if (type[i] == 0) {
if (d_columns_int.empty())
return 0;
if (d_columns_int[j].size() == 0)
return 0;
}
else if (type[i] == 1) {
if (d_columns_float.empty())
return 0;
if(d_columns_float[j].size() == 0)
return 0;
}
else if (type[i] == 2) {
if(d_columns_char.empty())
return 0;
if(d_columns_char[j] == NULL)
return 0;
};
return 1;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->ref_joins = ref_joins;
a->ref_sets = ref_sets;
a->ref_cols = ref_cols;
for ( map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
a->columnNames[(*it).first] = (*it).second;
for(unsigned int i=0; i < mColumnCount; i++) {
a->cols[i] = cols[i];
a->type[i] = type[i];
if(a->type[i] == 0) {
a->d_columns_int.push_back(thrust::device_vector<int_type>());
a->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
a->type_index[i] = a->d_columns_int.size()-1;
}
else if(a->type[i] == 1) {
a->d_columns_float.push_back(thrust::device_vector<float_type>());
a->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
a->type_index[i] = a->d_columns_float.size()-1;
a->decimal[i] = decimal[i];
}
else {
a->h_columns_char.push_back(NULL);
a->d_columns_char.push_back(NULL);
a->type_index[i] = a->d_columns_char.size()-1;
a->char_size.push_back(char_size[type_index[i]]);
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
void CudaSet::readSegmentsFromFile(unsigned int segNum, unsigned int colIndex, size_t offset)
{
std::clock_t start1 = std::clock();
string f1(load_file_name);
f1 += "." + int_to_string(cols[colIndex]) + "." + int_to_string(segNum);
unsigned int cnt;
FILE* f;
f = fopen(f1.c_str(), "rb" );
if(f == NULL) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
size_t rr;
if(type[colIndex] == 0) {
if(1 > h_columns_int[type_index[colIndex]].size())
h_columns_int[type_index[colIndex]].resize(1);
fread(h_columns_int[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_int[type_index[colIndex]].data()))[0];
if(cnt > h_columns_int[type_index[colIndex]].size()/8 + 10)
h_columns_int[type_index[colIndex]].resize(cnt/8 + 10);
rr = fread((unsigned int*)(h_columns_int[type_index[colIndex]].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
cout << "Couldn't read " << cnt+52 << " bytes from " << f1 << " ,read only " << rr << endl;
exit(0);
};
}
else if(type[colIndex] == 1) {
if(1 > h_columns_float[type_index[colIndex]].size())
h_columns_float[type_index[colIndex]].resize(1);
fread(h_columns_float[type_index[colIndex]].data(), 4, 1, f);
cnt = ((unsigned int*)(h_columns_float[type_index[colIndex]].data()))[0];
if(cnt > h_columns_float[type_index[colIndex]].size()/8 + 10)
h_columns_float[type_index[colIndex]].resize(cnt/8 + 10);
rr = fread((unsigned int*)(h_columns_float[type_index[colIndex]].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
cout << "Couldn't read " << cnt+52 << " bytes from " << f1 << endl;
exit(0);
};
}
else {
decompress_char(f, colIndex, segNum, offset);
};
fclose(f);
//if(verbose)
// std::cout<< "read from file time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
};
void CudaSet::decompress_char(FILE* f, unsigned int colIndex, unsigned int segNum, size_t offset)
{
unsigned int bits_encoded, fit_count, sz, vals_count, real_count;
const unsigned int len = char_size[type_index[colIndex]];
fread(&sz, 4, 1, f);
char* d_array = new char[sz*len];
fread((void*)d_array, sz*len, 1, f);
void* d;
cudaMalloc((void **) &d, sz*len);
cudaMemcpy( d, (void *) d_array, sz*len, cudaMemcpyHostToDevice);
delete[] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
thrust::device_ptr<unsigned int> param = thrust::device_malloc<unsigned int>(2);
param[1] = fit_count;
param[0] = bits_encoded;
unsigned long long int* int_array = new unsigned long long int[vals_count];
fread((void*)int_array, 1, vals_count*8, f);
//fclose(f);
void* d_val;
cudaMalloc((void **) &d_val, vals_count*8);
cudaMemcpy(d_val, (void *) int_array, vals_count*8, cudaMemcpyHostToDevice);
delete[] int_array;
void* d_int;
cudaMalloc((void **) &d_int, real_count*4);
thrust::counting_iterator<unsigned int> begin(0);
decompress_functor_str ff((unsigned long long int*)d_val,(unsigned int*)d_int, (unsigned int*)thrust::raw_pointer_cast(param));
thrust::for_each(begin, begin + real_count, ff);
if(!alloced_switch)
str_gather(d_int, real_count, d, d_columns_char[type_index[colIndex]] + offset*len, len);
else
str_gather(d_int, real_count, d, alloced_tmp, len);
mRecCount = real_count;
cudaFree(d);
cudaFree(d_val);
thrust::device_free(param);
cudaFree(d_int);
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
switch(type[colIndex]) {
case 0 :
if(!alloced_switch)
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_int[type_index[colIndex]].begin() + offset);
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[type_index[colIndex]].begin() + maxRecs*segment, h_columns_int[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
case 1 :
if(!alloced_switch) {
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_columns_float[type_index[colIndex]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[type_index[colIndex]].begin() + maxRecs*segment, h_columns_float[type_index[colIndex]].begin() + maxRecs*segment + mRecCount, d_col);
};
break;
default :
if(!alloced_switch) {
cudaMemcpy(d_columns_char[type_index[colIndex]] + char_size[type_index[colIndex]]*offset, h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
}
else
cudaMemcpy(alloced_tmp , h_columns_char[type_index[colIndex]] + maxRecs*segment*char_size[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
readSegmentsFromFile(segment,colIndex, offset);
if(type[colIndex] != 2) {
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
};
if(type[colIndex] == 0) {
if(!alloced_switch) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + offset), h_columns_int[type_index[colIndex]].data(), d_v, s_v);
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[type_index[colIndex]].data(), d_v, s_v);
};
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
if(!alloced_switch) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + offset) , h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin(), long_to_float());
}
else {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
}
//else // uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(unsigned int colIndex) // copy all segments
{
if(not_compressed) {
switch(type[colIndex]) {
case 0 :
thrust::copy(h_columns_int[type_index[colIndex]].begin(), h_columns_int[type_index[colIndex]].begin() + mRecCount, d_columns_int[type_index[colIndex]].begin());
break;
case 1 :
thrust::copy(h_columns_float[type_index[colIndex]].begin(), h_columns_float[type_index[colIndex]].begin() + mRecCount, d_columns_float[type_index[colIndex]].begin());
break;
default :
cudaMemcpy(d_columns_char[type_index[colIndex]], h_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*mRecCount, cudaMemcpyHostToDevice);
};
}
else {
size_t totals = 0;
if(d_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(s_v == NULL)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colIndex, cnt);
if(type[colIndex] == 0) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data() + totals), h_columns_int[type_index[colIndex]].data(), d_v, s_v);
}
else if(type[colIndex] == 1) {
if(decimal[colIndex]) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totals) , h_columns_float[type_index[colIndex]].data(), d_v, s_v);
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data() + totals));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[type_index[colIndex]].begin() + totals, long_to_float());
}
// else uncompressed float
//cudaMemcpy( d_columns[colIndex], (void *) ((float_type*)h_columns[colIndex] + offset), count*float_size, cudaMemcpyHostToDevice);
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = totals;
};
}
void CudaSet::CopyColumnToHost(int colIndex, size_t offset, size_t RecCount)
{
switch(type[colIndex]) {
case 0 :
thrust::copy(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + RecCount, h_columns_int[type_index[colIndex]].begin() + offset);
break;
case 1 :
thrust::copy(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + RecCount, h_columns_float[type_index[colIndex]].begin() + offset);
break;
default :
cudaMemcpy(h_columns_char[type_index[colIndex]] + offset*char_size[type_index[colIndex]], d_columns_char[type_index[colIndex]], char_size[type_index[colIndex]]*RecCount, cudaMemcpyDeviceToHost);
}
}
void CudaSet::CopyColumnToHost(int colIndex)
{
CopyColumnToHost(colIndex, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < mColumnCount; i++) {
CopyColumnToHost(i, offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(d_columns_int[type_index[colIndex]].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_float[type_index[colIndex]].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
unsigned int colIndex = columnNames.find(name)->second;
return thrust::raw_pointer_cast(h_columns_int[type_index[colIndex]].data());
}
void CudaSet::GroupBy(stack<string> columnRef, unsigned int int_col_count)
{
unsigned int colIndex;
if(grp)
cudaFree(grp);
CUDA_SAFE_CALL(cudaMalloc((void **) &grp, mRecCount * sizeof(bool)));
thrust::device_ptr<bool> d_grp(grp);
thrust::sequence(d_grp, d_grp+mRecCount, 0, 0);
thrust::device_ptr<bool> d_group = thrust::device_malloc<bool>(mRecCount);
d_group[mRecCount-1] = 1;
unsigned int i_count = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
columnGroups.push(columnRef.top()); // save for future references
colIndex = columnNames[columnRef.top()];
if (type[colIndex] == 0) { // int_type
thrust::transform(d_columns_int[type_index[colIndex]].begin(), d_columns_int[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_int[type_index[colIndex]].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else if (type[colIndex] == 1) { // float_type
thrust::transform(d_columns_float[type_index[colIndex]].begin(), d_columns_float[type_index[colIndex]].begin() + mRecCount - 1,
d_columns_float[type_index[colIndex]].begin()+1, d_group, f_not_equal_to());
}
else { // Char
//str_grp(d_columns_char[type_index[colIndex]], mRecCount, d_group, char_size[type_index[colIndex]]);
//use int_type
thrust::transform(d_columns_int[int_col_count+i_count].begin(), d_columns_int[int_col_count+i_count].begin() + mRecCount - 1,
d_columns_int[int_col_count+i_count].begin()+1, d_group, thrust::not_equal_to<int_type>());
i_count++;
};
thrust::transform(d_group, d_group+mRecCount, d_grp, d_grp, thrust::logical_or<bool>());
};
thrust::device_free(d_group);
grp_count = thrust::count(d_grp, d_grp+mRecCount,1);
};
void CudaSet::addDeviceColumn(int_type* col, int colIndex, string colName, size_t recCount)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 0;
d_columns_int.push_back(thrust::device_vector<int_type>(recCount));
h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
type_index[colIndex] = d_columns_int.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_int[type_index[colIndex]].size() < recCount) {
d_columns_int[type_index[colIndex]].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[type_index[colIndex]].begin());
};
void CudaSet::addDeviceColumn(float_type* col, int colIndex, string colName, size_t recCount, bool is_decimal)
{
if (columnNames.find(colName) == columnNames.end()) {
columnNames[colName] = colIndex;
type[colIndex] = 1;
d_columns_float.push_back(thrust::device_vector<float_type>(recCount));
h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
type_index[colIndex] = d_columns_float.size()-1;
}
else { // already exists, my need to resize it
if(d_columns_float[type_index[colIndex]].size() < recCount)
d_columns_float[type_index[colIndex]].resize(recCount);
};
decimal[colIndex] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[type_index[colIndex]].begin());
};
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!op_sort.empty()) { //sort the segment
//copy the key columns to device
queue<string> sf(op_sort);
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*max_char(this, sf)));
string sort_type = "ASC";
while(!sf.empty()) {
int colInd = columnNames[sf.front()];
allocColumnOnDevice(colInd, maxRecs);
CopyColumnToGpu(colInd);
if (type[colInd] == 0)
update_permutation(d_columns_int[type_index[colInd]], raw_ptr, mRecCount, sort_type, (int_type*)temp);
else if (type[colInd] == 1)
update_permutation(d_columns_float[type_index[colInd]], raw_ptr, mRecCount, sort_type, (float_type*)temp);
else {
update_permutation_char(d_columns_char[type_index[colInd]], raw_ptr, mRecCount, sort_type, (char*)temp, char_size[type_index[colInd]]);
};
deAllocColumnOnDevice(colInd);
sf.pop();
};
cudaFree(temp);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
unsigned int old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i< mColumnCount; i++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
new_offset = 0;
if(!op_sort.empty()) {
allocColumnOnDevice(i, maxRecs);
CopyColumnToGpu(i);
};
if(type[i] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[type_index[i]].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[type_index[i]], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[type_index[i]], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_int[type_index[i]].begin() + offset, h_columns_int[type_index[i]].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[type_index[i]], 0);
};
}
else if(type[i] == 1) {
if(decimal[i]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[type_index[i]], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[type_index[i]], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[type_index[i]].begin() + offset, h_columns_float[type_index[i]].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[type_index[i]], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[type_index[i]].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[type_index[i]].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[type_index[i]].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[type_index[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[type_index[i]], h_permutation, mRecCount, t, char_size[type_index[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[type_index[i]]*mRecCount, h_columns_char[type_index[i]]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + int_to_string(cols[i]);
curr_file = str;
str += "." + int_to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, i, partition_recs, new_offset);
else
compress_char(str, i, mCount - partition_recs*p, new_offset);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else
compress_char(str, i, mCount, offset);
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, cols[i], total_segments-1);
else {
writeHeader(file_name, cols[i], total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, unsigned int col, unsigned int tot_segs) {
string str = file_name + "." + int_to_string(col);
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, unsigned int col, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + int_to_string(col);
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
idx = cols[columnNames[os.front()]];
if(verbose)
cout << "sorted on " << idx << endl;
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
}
else if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = cols[columnNames[os.front()]];
binary_file.write((char *)&idx, 4);
os.pop();
};
binary_file.close();
};
}
using namespace mgpu;
void CudaSet::Store(string file_name, char* sep, unsigned int limit, bool binary, bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int i = 0; i< mColumnCount; i++) {
writeHeader(file_name, cols[i], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
//cout << "mCount " << mCount << " " << mRecCount << endl;
if(binary == 0) {
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (file_pr == NULL)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
char buffer [33];
string ss;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
for (map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it )
op_vx.push((*it).first);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < mColumnCount; j++) {
if (type[j] == 0) {
sprintf(buffer, "%lld", (h_columns_int[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else if (type[j] == 1) {
sprintf(buffer, "%.2f", (h_columns_float[type_index[j]])[i] );
fputs(buffer,file_pr);
fputs(sep, file_pr);
}
else {
ss.assign(h_columns_char[type_index[j]] + (i*char_size[type_index[j]]), char_size[type_index[j]]);
trim(ss);
fputs(ss.c_str(), file_pr);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
}
else if(text_source) { //writing a binary file using a text file as a source
// time to perform join checks on REFERENCES dataset segments
for(unsigned int i = 0; i< mColumnCount; i++) {
if(ref_sets.find(i) != ref_sets.end()) {
string f1 = file_name + "." + int_to_string(i) + ".refs";
fstream f_file;
if(total_segments == 0) {
f_file.open(f1.c_str(), ios::out|ios::trunc|ios::binary);
unsigned int len = ref_sets[i].size();
f_file.write((char *)&len, 4);
f_file.write(ref_sets[i].c_str(), len);
f_file.write((char *)&ref_cols[i], 4);
}
else {
f_file.open(f1.c_str(), ios::out|ios::app|ios::binary);
};
f1 = ref_sets[i] + "." + int_to_string(ref_cols[i]) + ".header";
FILE* ff = fopen(f1.c_str(), "rb");
if(ff == NULL) {
cout << "Couldn't open file " << f1 << endl;
exit(0);
};
unsigned int ref_segCount, ref_maxRecs;
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_segCount, 4, 1, ff);
fread((char *)&ref_maxRecs, 4, 1, ff);
fclose(ff);
//cout << "CALC " << i << " " << ref_sets[i] << " " << ref_cols[i] << " " << ref_segCount << endl;
CudaSet* a = new CudaSet(maxRecs, 1);
a->h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
a->d_columns_int.push_back(thrust::device_vector<int_type>(ref_maxRecs));
a->type[0] = 0;
a->type_index[0] = 0;
a->not_compressed = 0;
a->load_file_name = ref_sets[i];
a->cols[0] = 1;
MGPU_MEM(int) aIndicesDevice, bIndicesDevice;
size_t res_count;
if(!onDevice(i)) {
allocColumnOnDevice(i, maxRecs);
};
CopyColumnToGpu(i);
thrust::sort(d_columns_int[type_index[i]].begin(), d_columns_int[type_index[i]].begin() + mRecCount);
f_file.write((char *)&total_segments, 4);
f_file.write((char *)&ref_segCount, 4);
for(unsigned int z = 0; z < ref_segCount; z++) {
a->CopyColumnToGpu(0, z, 0);
thrust::sort(a->d_columns_int[0].begin(), a->d_columns_int[0].begin() + a->mRecCount);
// check if there is a join result
//cout << "join " << mRecCount << " " << a->mRecCount << endl;
res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(d_columns_int[type_index[i]].data()), mRecCount,
thrust::raw_pointer_cast(a->d_columns_int[0].data()), a->mRecCount,
&aIndicesDevice, &bIndicesDevice,
mgpu::less<int_type>(), *context);
//cout << "RES " << i << " " << total_segments << ":" << z << " " << res_count << endl;
f_file.write((char *)&z, 4);
f_file.write((char *)&res_count, 8);
};
f_file.close();
a->deAllocColumnOnDevice(0);
a->free();
};
};
compress(file_name, 0, 1, 0, mCount);
for(unsigned int i = 0; i< mColumnCount; i++)
if(type[i] == 2)
deAllocColumnOnDevice(i);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for (map<string,unsigned int>::iterator it=columnNames.begin() ; it != columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount);
offset = offset + mCount;
};
};
};
}
void CudaSet::compress_char(string file_name, unsigned int index, size_t mCount, size_t offset)
{
std::map<string,unsigned int> dict;
std::vector<string> dict_ordered;
std::vector<unsigned int> dict_val;
map<string,unsigned int>::iterator iter;
unsigned int bits_encoded, ss;
unsigned int len = char_size[type_index[index]];
for (unsigned int i = 0 ; i < mCount; i++) {
string f(h_columns_char[type_index[index]] + (i+offset)*len, len);
if((iter = dict.find(f)) != dict.end()) {
dict_val.push_back(iter->second);
}
else {
ss = (unsigned int)dict.size();
dict[f] = ss;
dict_val.push_back(ss);
dict_ordered.push_back(f);
};
};
bits_encoded = (unsigned int)ceil(log2(double(dict.size()+1)));
char *cc = new char[len+1];
unsigned int sz = (unsigned int)dict_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary);
binary_file.write((char *)&sz, 4);
for(unsigned int i = 0; i < dict_ordered.size(); i++) {
memset(&cc[0], 0, len);
strcpy(cc,dict_ordered[i].c_str());
binary_file.write(cc, len);
};
delete [] cc;
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, 8);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool CudaSet::LoadBigFile(FILE* file_p)
{
char line[1000];
unsigned int current_column, count = 0, index;
char *p,*t;
const char* sep = separator.c_str();
map<unsigned int,unsigned int> col_map;
for(unsigned int i = 0; i < mColumnCount; i++) {
col_map[cols[i]] = i;
};
while (count < process_count && fgets(line, 1000, file_p) != NULL) {
strtok(line, "\n");
current_column = 0;
for(t=mystrtok(&p,line,*sep); t; t=mystrtok(&p,0,*sep)) {
current_column++;
if(col_map.find(current_column) == col_map.end()) {
continue;
};
index = col_map[current_column];
if (type[index] == 0) {
if (strchr(t,'-') == NULL) {
(h_columns_int[type_index[index]])[count] = atoll(t);
}
else { // handling possible dates
strncpy(t+4,t+5,2);
strncpy(t+6,t+8,2);
t[8] = '\0';
(h_columns_int[type_index[index]])[count] = atoll(t);
};
}
else if (type[index] == 1) {
(h_columns_float[type_index[index]])[count] = atoff(t);
}
else {//char
strcpy(h_columns_char[type_index[index]] + count*char_size[type_index[index]], t);
}
};
count++;
};
mRecCount = count;
if(count < process_count) {
fclose(file_p);
return 1;
}
else
return 0;
};
void CudaSet::free() {
for(unsigned int i = 0; i < mColumnCount; i++ ) {
if(type[i] == 2 && h_columns_char[type_index[i]]) {
delete [] h_columns_char[type_index[i]];
h_columns_char[type_index[i]] = NULL;
}
else {
if(type[i] == 0 ) {
h_columns_int[type_index[i]].resize(0);
h_columns_int[type_index[i]].shrink_to_fit();
}
else if(type[i] == 1) {
h_columns_float[type_index[i]].resize(0);
h_columns_float[type_index[i]].shrink_to_fit();
};
}
};
prm_d.resize(0);
prm_d.shrink_to_fit();
deAllocOnDevice();
//cout << "dealloced " << name << " " << getFreeMem() << endl;
delete type;
delete decimal;
if(grp_type)
delete grp_type;
delete cols;
if(fil_s)
delete fil_s;
if(fil_f)
delete fil_f;
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
}
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, int reverse)
{
thrust::device_ptr<int_type> temp = thrust::device_malloc<int_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<int_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
};
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,int reverse)
{
thrust::device_ptr<float_type> temp = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
return thrust::raw_pointer_cast(temp);
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
FILE* f;
string f1;
unsigned int cnt, bytes;
prealloc_char_size = 0;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
sorted_fields.push(idx);
if(verbose)
cout << "segment sorted on " << idx << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
presorted_fields.push(idx);
if(verbose)
cout << "presorted on " << idx << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
f1 = file_name + "." + int_to_string(colsRef.front()) + ".0";
f = fopen (f1.c_str() , "rb" );
fread((char *)&bytes, 4, 1, f);
fclose(f);
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + int_to_string(colsRef.front()) + ".0";
f = fopen (f1.c_str() , "rb" );
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
//check the references
f1 = file_name + "." + int_to_string(colsRef.front()-1) + ".refs";
f = fopen (f1.c_str() , "rb" );
if(f != NULL) {
unsigned int len;
fread(&len, 4, 1, f);
char* array = new char[len];
fread((void*)array, len, 1, f);
ref_sets[i] = array;
delete [] array;
unsigned int z, segs, seg_num, curr_seg;
size_t res_count;
fread((void*)&z, 4, 1, f);
ref_cols[i] = z;
unsigned int bytes_read = fread((void*)&curr_seg, 4, 1, f);
while(bytes_read == 1) {
fread((void*)&segs, 4, 1, f); //ref seg count
//cout << "for " << i << " read " << array << " and " << z << " " << segs << endl;
for(unsigned int j = 0; j < segs; j++) {
fread((void*)&seg_num, 4, 1, f);
fread((void*)&res_count, 8, 1, f);
//cout << "curr_seg " << curr_seg << " " << seg_num << " " << res_count << endl;
if(res_count)
ref_joins[i][curr_seg].insert(seg_num);
else
ref_joins[i][curr_seg].insert(std::numeric_limits<unsigned int>::max());
};
bytes_read = fread((void*)&curr_seg, 4, 1, f);
};
fclose(f);
};
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
//h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >(bytes/8 + 10));
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
//cout << "creating " << name << " " << nameRef.front() << " " << bytes/8 + 10 << endl;
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
//h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(bytes/8 + 10));
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type >());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
//h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >(bytes/8 + 10));
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, queue<string> &references, queue<int> &references_nums)
{
mColumnCount = (unsigned int)nameRef.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
prealloc_char_size = 0;
tmp_table = 0;
filtered = 0;
mRecCount = Recs;
hostRecCount = Recs;
segCount = 1;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames[nameRef.front()] = i;
cols[i] = colsRef.front();
if ((typeRef.front()).compare("int") == 0) {
type[i] = 0;
decimal[i] = 0;
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
d_columns_int.push_back(thrust::device_vector<int_type>());
type_index[i] = h_columns_int.size()-1;
}
else if ((typeRef.front()).compare("float") == 0) {
type[i] = 1;
decimal[i] = 0;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[i] = 1;
decimal[i] = 1;
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
d_columns_float.push_back(thrust::device_vector<float_type>());
type_index[i] = h_columns_float.size()-1;
}
else {
type[i] = 2;
decimal[i] = 0;
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
char_size.push_back(sizeRef.front());
type_index[i] = h_columns_char.size()-1;
};
if(!references.front().empty()) {
ref_sets[i] = references.front();
ref_cols[i] = references_nums.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
references.pop();
references_nums.pop();
};
};
void CudaSet::initialize(size_t RecordCount, unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
prealloc_char_size = 0;
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
filtered = 0;
for(unsigned int i =0; i < mColumnCount; i++) {
cols[i] = i;
};
};
void CudaSet::initialize(queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
unsigned int i = 0;
while(!op_sel.empty()) {
if(!setMap.count(op_sel.front())) {
cout << "coudn't find column " << op_sel.front() << endl;
exit(0);
};
CudaSet* a = varNames[setMap[op_sel.front()]];
if(i == 0)
maxRecs = a->maxRecs;
index = a->columnNames[op_sel.front()];
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if(a->columnNames.find(q_cnt.front()) != a->columnNames.end() || b->columnNames.find(q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
type = new unsigned int[mColumnCount];
cols = new unsigned int[mColumnCount];
decimal = new bool[mColumnCount];
maxRecs = b->maxRecs;
map<string,unsigned int>::iterator it;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
prealloc_char_size = 0;
unsigned int index;
i = 0;
while(!op_sel.empty() && (columnNames.find(op_sel.front()) == columnNames.end())) {
if((it = a->columnNames.find(op_sel.front())) != a->columnNames.end()) {
index = it->second;
cols[i] = i;
decimal[i] = a->decimal[i];
columnNames[op_sel.front()] = i;
if (a->type[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((a->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(a->char_size[a->type_index[index]]);
};
i++;
}
else if((it = b->columnNames.find(op_sel.front())) != b->columnNames.end()) {
index = it->second;
columnNames[op_sel.front()] = i;
cols[i] = i;
decimal[i] = b->decimal[index];
if ((b->type)[index] == 0) {
d_columns_int.push_back(thrust::device_vector<int_type>());
h_columns_int.push_back(thrust::host_vector<int_type, pinned_allocator<int_type> >());
type[i] = 0;
type_index[i] = h_columns_int.size()-1;
}
else if ((b->type)[index] == 1) {
d_columns_float.push_back(thrust::device_vector<float_type>());
h_columns_float.push_back(thrust::host_vector<float_type, pinned_allocator<float_type> >());
type[i] = 1;
type_index[i] = h_columns_float.size()-1;
}
else {
h_columns_char.push_back(NULL);
d_columns_char.push_back(NULL);
type[i] = 2;
type_index[i] = h_columns_char.size()-1;
char_size.push_back(b->char_size[b->type_index[index]]);
};
i++;
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
size_t max_sz = max_tmp(a) ;
CudaSet* t = varNames[setMap[fields.front()]];
if(max_sz*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, max_sz*t->maxRecs);
alloced_sz = max_sz*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(setMap.count(fields.front()) > 0) {
unsigned int idx = a->columnNames[fields.front()];
bool onDevice = 0;
if(a->type[idx] == 0) {
if(a->d_columns_int[a->type_index[idx]].size() > 0) {
onDevice = 1;
}
}
else if(a->type[idx] == 1) {
if(a->d_columns_float[a->type_index[idx]].size() > 0) {
onDevice = 1;
};
}
else {
if((a->d_columns_char[a->type_index[idx]]) != NULL) {
onDevice = 1;
};
};
if (!onDevice) {
a->allocColumnOnDevice(idx, a->maxRecs);
}
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
unsigned int tindex = t->columnNames[field];
unsigned int idx = a->columnNames[field];
if(!a->onDevice(idx)) {
a->allocColumnOnDevice(idx, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(tindex, idx, a, t, count, a->mRecCount);
}
else {
mycopy(tindex, idx, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
size_t getSegmentRecCount(CudaSet* a, unsigned int segment) {
if (segment == a->segCount-1) {
return a->hostRecCount - a->maxRecs*segment;
}
else
return a->maxRecs;
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz) {
a->resizeDevice(count);
a->devRecCount = count+a->mRecCount;
};
};
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && setMap.count(fields.front()) > 0) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[setMap[fields.front()]];
alloced_switch = 1;
t->CopyColumnToGpu(t->columnNames[fields.front()], segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
a->orig_segs[t->load_file_name].insert(segment);
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(a->columnNames[fields.front()], segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void setPrm(CudaSet* a, CudaSet* b, char val, unsigned int segment) {
b->prm_index = val;
if (val == 'A') {
b->mRecCount = getSegmentRecCount(a,segment);
}
else if (val == 'N') {
b->mRecCount = 0;
}
}
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_int[t->type_index[tindex]].begin(), a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
t->d_columns_float[t->type_index[tindex]].begin(), a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size,
d_col, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
(void*)t->d_columns_char[t->type_index[tindex]], (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (unsigned int)a->char_size[a->type_index[idx]] );
}
else {
str_gather((void*)thrust::raw_pointer_cast(a->prm_d.data()), g_size,
alloced_tmp, (void*)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (unsigned int)a->char_size[a->type_index[idx]] );
};
}
};
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[tindex] == 0) {
if(!alloced_switch) {
thrust::copy(t->d_columns_int[t->type_index[tindex]].begin(), t->d_columns_int[t->type_index[tindex]].begin() + g_size,
a->d_columns_int[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[a->type_index[idx]].begin() + offset);
};
}
else if(t->type[tindex] == 1) {
if(!alloced_switch) {
thrust::copy(t->d_columns_float[t->type_index[tindex]].begin(), t->d_columns_float[t->type_index[tindex]].begin() + g_size,
a->d_columns_float[a->type_index[idx]].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[a->type_index[idx]].begin() + offset);
};
}
else {
if(!alloced_switch) {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), (void**)t->d_columns_char[t->type_index[tindex]],
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
}
else {
cudaMemcpy((void**)(a->d_columns_char[a->type_index[idx]] + offset*a->char_size[a->type_index[idx]]), alloced_tmp,
g_size*t->char_size[t->type_index[tindex]], cudaMemcpyDeviceToDevice);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, bool str_join, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(right->columnNames.find(c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() || str_join) {
cc.push(c1.front());
};
};
c1.pop();
};
if(!str_join && right->columnNames.find(f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
rcount = right->maxRecs;
}
else
rcount = right->mRecCount;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(right->columnNames[ct.front()], rcount);
};
ct.pop();
};
size_t cnt_r = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
//cout << "RIGHT SEG " << i << " " << cnt_r << " " << right->d_columns_int[1][0] << "-" << right->d_columns_int[1][cnt_r-1] << endl;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->char_size.size(); i++)
if (a->char_size[i] > max_char1)
max_char1 = a->char_size[i];
return max_char1;
};
size_t max_char(CudaSet* a, set<string> field_names)
{
size_t max_char1 = 8, i;
for (set<string>::iterator it=field_names.begin(); it!=field_names.end(); ++it) {
i = a->columnNames[*it];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char1)
max_char1 = a->char_size[a->type_index[i]];
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8, i;
while (!field_names.empty()) {
i = a->columnNames[field_names.front()];
if (a->type[i] == 2) {
if (a->char_size[a->type_index[i]] > max_char)
max_char = a->char_size[a->type_index[i]];
};
field_names.pop();
};
return max_char;
};
size_t max_tmp(CudaSet* a)
{
size_t max_sz = 0;
for(unsigned int i = 0; i < a->mColumnCount; i++) {
if(a->type[i] == 0) {
if(int_size > max_sz)
max_sz = int_size;
}
else if(a->type[i] == 1) {
if(float_size > max_sz)
max_sz = float_size;
};
};
size_t m_char = max_char(a);
if(m_char > max_sz)
return m_char;
else
return max_sz;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0, idx;
while(!cols.empty()) {
idx = a->columnNames[cols.front()];
if(a->type[idx] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[a->type_index[idx]];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather((void*)permutation, RecCount, (void*)key, (void*)tmp, len);
// stable_sort the permuted keys and update the permutation
if (SortType.compare("DESC") == 0 )
str_sort(tmp, RecCount, permutation, 1, len);
else
str_sort(tmp, RecCount, permutation, 0, len);
}
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(char *s, char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
std::clock_t start1 = std::clock();
if(a->mRecCount == 0) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
size_t cnt = 0;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0)
b->prm_d.resize(a->maxRecs);
//cout << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, a, segment);
//cout << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
copyColumns(a, b->fil_value, segment, cnt);
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
if(segment == a->segCount-1)
b->type_index = a->type_index;
cudaFree(res);
}
else {
setPrm(a,b,map_check,segment);
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
//cout << "filter res " << b->mRecCount << endl;
//std::cout<< "filter time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n';
}
void sort_right(CudaSet* right, unsigned int colInd2, string f2, queue<string> op_g, queue<string> op_sel,
bool decimal_join, bool& str_join, size_t& rcount) {
size_t cnt_r = 0;
right->hostRecCount = right->mRecCount;
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = 0; i < right->segCount; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
cnt_r = right->d_columns_int[right->d_columns_int.size()-1].size();
};
// need to allocate all right columns
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, 0, right->segCount);
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
//here we need to make sure that right column is ordered. If not then we order it and keep the permutation
bool sorted;
if(str_join || !decimal_join) {
sorted = thrust::is_sorted(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r);
}
else
sorted = thrust::is_sorted(right->d_columns_float[right->type_index[colInd2]].begin(), right->d_columns_float[right->type_index[colInd2]].begin() + cnt_r);
if(!sorted) {
thrust::device_ptr<unsigned int> v = thrust::device_malloc<unsigned int>(cnt_r);
thrust::sequence(v, v + cnt_r, 0, 1);
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, cnt_r*max_char(right)));
if(str_join) {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v);
}
else {
thrust::sort_by_key(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, v);
};
if(!right->not_compressed) {
right->mRecCount = 0;
right->resize(cnt_r);
};
thrust::copy(right->d_columns_int[right->type_index[colInd2]].begin(), right->d_columns_int[right->type_index[colInd2]].begin() + cnt_r, right->h_columns_int[right->type_index[colInd2]].begin());
right->deAllocColumnOnDevice(colInd2);
unsigned int i;
while(!op_sel.empty()) {
if (right->columnNames.find(op_sel.front()) != right->columnNames.end()) {
i = right->columnNames[op_sel.front()];
if(i != colInd2) {
queue<string> op_alt1;
op_alt1.push(op_sel.front());
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, 0, right->segCount, 0, 0);
if(right->type[i] == 0) {
thrust::device_ptr<int_type> d_tmp((int_type*)d);
thrust::gather(v, v+cnt_r, right->d_columns_int[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->h_columns_int[right->type_index[i]].begin());
}
else if(right->type[i] == 1) {
thrust::device_ptr<float_type> d_tmp((float_type*)d);
thrust::gather(v, v+cnt_r, right->d_columns_float[right->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + cnt_r, right->h_columns_float[right->type_index[i]].begin());
}
else {
thrust::device_ptr<char> d_tmp((char*)d);
str_gather(thrust::raw_pointer_cast(v), cnt_r, (void*)right->d_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), right->char_size[right->type_index[i]]);
cudaMemcpy( (void*)right->h_columns_char[right->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), cnt_r*right->char_size[right->type_index[i]], cudaMemcpyDeviceToHost);
};
right->deAllocColumnOnDevice(i);
};
};
op_sel.pop();
};
thrust::device_free(v);
cudaFree(d);
right->not_compressed = 1;
}
}
size_t load_right(CudaSet* right, unsigned int colInd2, string f2, queue<string> op_g, queue<string> op_sel,
queue<string> op_alt, bool decimal_join, bool& str_join,
size_t& rcount, unsigned int start_seg, unsigned int end_seg, bool rsz) {
size_t cnt_r = 0;
//right->hostRecCount = right->mRecCount;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
if (right->type[colInd2] == 2) {
str_join = 1;
right->d_columns_int.push_back(thrust::device_vector<int_type>());
for(unsigned int i = start_seg; i < end_seg; i++) {
right->add_hashed_strings(f2, i, right->d_columns_int.size()-1);
};
cnt_r = right->d_columns_int[right->d_columns_int.size()-1].size();
};
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, start_seg, end_seg, rsz, 1);
}
else {
cnt_r = load_queue(op_alt, right, str_join, f2, rcount, start_seg, end_seg, rsz, 1);
};
if(str_join) {
colInd2 = right->mColumnCount+1;
right->type_index[colInd2] = right->d_columns_int.size()-1;
};
if(right->not_compressed) {
queue<string> op_alt1;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (right->columnNames.find(op_alt.front()) != right->columnNames.end()) {
op_alt1.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt1.empty())
cnt_r = load_queue(op_alt1, right, str_join, "", rcount, start_seg, end_seg, 0, 0);
};
return cnt_r;
};
unsigned int calc_right_partition(CudaSet* left, CudaSet* right, queue<string> op_sel) {
unsigned int tot_size = left->maxRecs*8;
while(!op_sel.empty()) {
if (right->columnNames.find(op_sel.front()) != right->columnNames.end()) {
if(right->type[right->columnNames[op_sel.front()]] <= 1) {
tot_size = tot_size + right->maxRecs*8*right->segCount;
}
else {
tot_size = tot_size + right->maxRecs*
right->char_size[right->type_index[right->columnNames[op_sel.front()]]]*
right->segCount;
};
};
op_sel.pop();
};
return right->segCount / ((tot_size/(getFreeMem() - 300000000)) + 1);
};
string int_to_string(int number){
string number_string = "";
char ones_char;
int ones = 0;
while(true){
ones = number % 10;
switch(ones){
case 0: ones_char = '0'; break;
case 1: ones_char = '1'; break;
case 2: ones_char = '2'; break;
case 3: ones_char = '3'; break;
case 4: ones_char = '4'; break;
case 5: ones_char = '5'; break;
case 6: ones_char = '6'; break;
case 7: ones_char = '7'; break;
case 8: ones_char = '8'; break;
case 9: ones_char = '9'; break;
default : cout << ("Trouble converting number to string.");
}
number -= ones;
number_string = ones_char + number_string;
if(number == 0){
break;
}
number = number/10;
}
return number_string;
}
void insert_records(char* f, char* s) {
char buf[4096];
size_t size, maxRecs;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
cout << "couldn't find " << s << endl;
exit(0);
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
cout << "couldn't find " << f << endl;
exit(0);
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_s = a->load_file_name + "." + int_to_string(a->cols[z]) + "." + int_to_string(i);
str_d = b->load_file_name + "." + int_to_string(b->cols[z]) + "." + int_to_string(b->segCount + i);
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
b->reWriteHeader(b->load_file_name, b->cols[z], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[z] == 0) {
thrust::copy(a->h_columns_int[a->type_index[z]].begin(), a->h_columns_int[a->type_index[z]].begin() + a->mRecCount, b->h_columns_int[b->type_index[z]].begin() + oldCount);
}
else if(b->type[z] == 1) {
thrust::copy(a->h_columns_float[a->type_index[z]].begin(), a->h_columns_float[a->type_index[z]].begin() + a->mRecCount, b->h_columns_float[b->type_index[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->type_index[z]] + b->char_size[b->type_index[z]]*oldCount, a->h_columns_char[a->type_index[z]], a->char_size[a->type_index[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = a->mRecCount;
total_max = process_count;
unsigned int segCount = (a->mRecCount/process_count + 1);
size_t offset = 0, mCount;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(a->mRecCount < process_count) {
mCount = a->mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = a->mRecCount - (segCount-1)*process_count;
};
a->compress(b->load_file_name, offset, 0, z - (segCount-1), mCount);
offset = offset + mCount;
};
//update headers
total_count = a->mRecCount + b->mRecCount;
//cout << "and now lets write " << a->mRecCount << " " << b->mRecCount << endl;
for(unsigned int i = 0; i< b->mColumnCount; i++) {
b->writeHeader(b->load_file_name, b->cols[i], total_segments);
};
};
};
void delete_records(char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
cout << "Delete operator is only applicable to disk based sets" << endl;
cout << "for deleting records from derived sets please use filter operator " << endl;
exit(0);
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for (map<string,unsigned int>::iterator it=a->columnNames.begin() ; it != a->columnNames.end(); ++it ) {
op_vx.push((*it).first);
};
allocColumns(a, op_vx);
a->prm_d.resize(a->maxRecs);
a->resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), not_identity<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
//cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
//cout << "rename " << i << " to " << new_seg_count << endl;
if(new_seg_count != i) {
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_old = a->load_file_name + "." + int_to_string(a->cols[z]);
str_old += "." + int_to_string(i);
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
if(a->type[z] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[a->type_index[z]].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[a->type_index[z]], 0);
}
else if(a->type[z] == 1){
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[z]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[a->type_index[z]].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[a->type_index[z]], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[a->type_index[z]].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[a->type_index[z]].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[a->type_index[z]].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
void* t;
CUDA_SAFE_CALL(cudaMalloc((void **) &t, tmp*a->char_size[a->type_index[z]]));
apply_permutation_char(a->d_columns_char[a->type_index[z]], (unsigned int*)thrust::raw_pointer_cast(a->prm_d.data()), tmp, (char*)t, a->char_size[a->type_index[z]]);
cudaMemcpy(a->h_columns_char[a->type_index[z]], t, a->char_size[a->type_index[z]]*a->mRecCount, cudaMemcpyDeviceToHost);
cudaFree(t);
a->compress_char(str, z, a->mRecCount, 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
//cout << "rename " << i << " to " << new_seg_count << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str_old = a->load_file_name + "." + int_to_string(a->cols[z]);
str_old += "." + int_to_string(i);
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
//cout << "TOTAL REM " << totalRemoved << endl;
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z< a->mColumnCount; z++) {
str = a->load_file_name + "." + int_to_string(a->cols[z]);
str += "." + int_to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int z = 0; z< a->mColumnCount; z++) {
a->reWriteHeader(a->load_file_name, a->cols[z], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
|
7022ab5475255b4f194e3507f3ce078a33676107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/fast_int_div.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest)
{
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(
int* computed, int* correct, const int* in, FastIntDiv fid, int divisor, int len)
{
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest)
{
hipStream_t stream = 0;
CUDA_CHECK(hipStreamCreate(&stream));
static const int len = 100000;
static const int TPB = 128;
rmm::device_uvector<int> computed(len * 2, stream);
rmm::device_uvector<int> correct(len * 2, stream);
rmm::device_uvector<int> in(len, stream);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
std::vector<int> h_in(len);
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
raft::update_device(in.data(), h_in.data(), len, stream);
int nblks = raft::ceildiv(len, TPB);
hipLaunchKernelGGL(( fastIntDivTestKernel), dim3(nblks), dim3(TPB), 0, 0,
computed.data(), correct.data(), in.data(), fid, divisor, len);
CUDA_CHECK(hipStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct.data(), computed.data(), len * 2, raft::Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num)
{
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage)
{
ASSERT_THROW(dummyFunc(-1), raft::exception);
ASSERT_THROW(dummyFunc(0), raft::exception);
}
} // namespace MLCommon
| 7022ab5475255b4f194e3507f3ce078a33676107.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <common/fast_int_div.cuh>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace MLCommon {
TEST(FastIntDiv, CpuTest)
{
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
for (int i = 0; i < 10000; ++i) {
auto num = rand();
auto correct = num / divisor;
auto computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = -num;
correct = num / divisor;
computed = num / fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
num = rand();
correct = num % divisor;
computed = num % fid;
ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num;
}
}
}
__global__ void fastIntDivTestKernel(
int* computed, int* correct, const int* in, FastIntDiv fid, int divisor, int len)
{
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < len) {
computed[tid] = in[tid] % fid;
correct[tid] = in[tid] % divisor;
computed[len + tid] = -in[tid] % fid;
correct[len + tid] = -in[tid] % divisor;
}
}
TEST(FastIntDiv, GpuTest)
{
cudaStream_t stream = 0;
CUDA_CHECK(cudaStreamCreate(&stream));
static const int len = 100000;
static const int TPB = 128;
rmm::device_uvector<int> computed(len * 2, stream);
rmm::device_uvector<int> correct(len * 2, stream);
rmm::device_uvector<int> in(len, stream);
for (int i = 0; i < 100; ++i) {
// get a positive divisor
int divisor;
do {
divisor = rand();
} while (divisor <= 0);
FastIntDiv fid(divisor);
// run it against a few random numbers and compare the outputs
std::vector<int> h_in(len);
for (int i = 0; i < len; ++i) {
h_in[i] = rand();
}
raft::update_device(in.data(), h_in.data(), len, stream);
int nblks = raft::ceildiv(len, TPB);
fastIntDivTestKernel<<<nblks, TPB, 0, 0>>>(
computed.data(), correct.data(), in.data(), fid, divisor, len);
CUDA_CHECK(cudaStreamSynchronize(0));
ASSERT_TRUE(devArrMatch(correct.data(), computed.data(), len * 2, raft::Compare<int>()))
<< " divisor=" << divisor;
}
}
FastIntDiv dummyFunc(int num)
{
FastIntDiv fd(num);
return fd;
}
TEST(FastIntDiv, IncorrectUsage)
{
ASSERT_THROW(dummyFunc(-1), raft::exception);
ASSERT_THROW(dummyFunc(0), raft::exception);
}
} // namespace MLCommon
|
5edbff18fb8543f4d55e56f20cb4c093f3722a93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/kernel/random_generator.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/common/data_type.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; }
}
template<>
__global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half h_scale = __float2half(scale);
CUDA_1D_KERNEL_LOOP(i, n) {
half one_or_zero = mask[i];
y[i] = __hmul(__hmul(x[i], one_or_zero), h_scale);
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
__global__ void GenMaskGpu(const int64_t n, float threshold, const float* random_tmp,
int8_t* mask) {
CUDA_1D_KERNEL_LOOP(i, n) { mask[i] = random_tmp[i] > threshold; }
}
template<typename T>
void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
hipLaunchKernelGGL(( MaskAndScaleGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, x, mask, y);
}
template<>
void MaskAndScale<float16>(DeviceCtx* ctx, const int64_t n, float scale, const float16* x,
const int8_t* mask, float16* y) {
hipLaunchKernelGGL(( MaskAndScaleGpu<half>)
, dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
n, scale, reinterpret_cast<const half*>(x), mask, reinterpret_cast<half*>(y));
}
template<typename T>
class DropoutKernelGPU final : public user_op::OpKernel {
public:
DropoutKernelGPU() = default;
~DropoutKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout") \
.SetCreateFn<DropoutKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_KERNEL_GPU(float16)
REGISTER_DROPOUT_KERNEL_GPU(float)
REGISTER_DROPOUT_KERNEL_GPU(double)
template<typename T>
class DropoutGradKernelGPU final : public user_op::OpKernel {
public:
DropoutGradKernelGPU() = default;
~DropoutGradKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(),
mask->dptr<int8_t>(), dx->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout_grad") \
.SetCreateFn<DropoutGradKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float16)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(double)
} // namespace
} // namespace oneflow
| 5edbff18fb8543f4d55e56f20cb4c093f3722a93.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/kernel/random_generator.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/core/common/data_type.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void MaskAndScaleGpu(const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = x[i] * static_cast<T>(mask[i]) * scale; }
}
template<>
__global__ void MaskAndScaleGpu<half>(const int64_t n, float scale, const half* x,
const int8_t* mask, half* y) {
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
half h_scale = __float2half(scale);
CUDA_1D_KERNEL_LOOP(i, n) {
half one_or_zero = mask[i];
y[i] = __hmul(__hmul(x[i], one_or_zero), h_scale);
}
#else
printf("use half need nvcc arch >= 530");
assert(false);
#endif /* __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)*/
}
__global__ void GenMaskGpu(const int64_t n, float threshold, const float* random_tmp,
int8_t* mask) {
CUDA_1D_KERNEL_LOOP(i, n) { mask[i] = random_tmp[i] > threshold; }
}
template<typename T>
void MaskAndScale(DeviceCtx* ctx, const int64_t n, float scale, const T* x, const int8_t* mask,
T* y) {
MaskAndScaleGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, x, mask, y);
}
template<>
void MaskAndScale<float16>(DeviceCtx* ctx, const int64_t n, float scale, const float16* x,
const int8_t* mask, float16* y) {
MaskAndScaleGpu<half>
<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
n, scale, reinterpret_cast<const half*>(x), mask, reinterpret_cast<half*>(y));
}
template<typename T>
class DropoutKernelGPU final : public user_op::OpKernel {
public:
DropoutKernelGPU() = default;
~DropoutKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), in->shape().elem_cnt(), scale, in->dptr<T>(),
mask->dptr<int8_t>(), out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout") \
.SetCreateFn<DropoutKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_KERNEL_GPU(float16)
REGISTER_DROPOUT_KERNEL_GPU(float)
REGISTER_DROPOUT_KERNEL_GPU(double)
template<typename T>
class DropoutGradKernelGPU final : public user_op::OpKernel {
public:
DropoutGradKernelGPU() = default;
~DropoutGradKernelGPU() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* mask = ctx->Tensor4ArgNameAndIndex("mask", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const float scale = ctx->Attr<float>("scale");
MaskAndScale<T>(ctx->device_ctx(), dy->shape().elem_cnt(), scale, dy->dptr<T>(),
mask->dptr<int8_t>(), dx->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_DROPOUT_GRAD_KERNEL_GPU(dtype) \
REGISTER_USER_KERNEL("dropout_grad") \
.SetCreateFn<DropoutGradKernelGPU<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float16)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(float)
REGISTER_DROPOUT_GRAD_KERNEL_GPU(double)
} // namespace
} // namespace oneflow
|
d1f7f5498421ede095ff5393851dd5a7278d926c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_complex.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cudaerror.h"
#include <iostream>
// This is the kernel that will get launched on the device
__global__ void multiply_const_kernel(const float* in, float* out, float k, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
out[i] = in[i] * k;
}
}
// Kernel wrapper so that the GNU Radio code doesn't have to compile with nvcc
void exec_multiply_const_kernel(const float* in,
float* out,
float k,
int grid_size,
int block_size,
size_t n,
hipStream_t stream)
{
hipLaunchKernelGGL(( multiply_const_kernel), dim3(grid_size), dim3(block_size), 0, stream, in, out, k, n);
check_cuda_errors(hipGetLastError());
}
void get_block_and_grid(int* minGrid, int* minBlock)
{
check_cuda_errors(hipOccupancyMaxPotentialBlockSize(
minGrid, minBlock, multiply_const_kernel, 0, 0));
} | d1f7f5498421ede095ff5393851dd5a7278d926c.cu | #include <stdio.h>
#include <cuComplex.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cudaerror.h"
#include <iostream>
// This is the kernel that will get launched on the device
__global__ void multiply_const_kernel(const float* in, float* out, float k, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
out[i] = in[i] * k;
}
}
// Kernel wrapper so that the GNU Radio code doesn't have to compile with nvcc
void exec_multiply_const_kernel(const float* in,
float* out,
float k,
int grid_size,
int block_size,
size_t n,
cudaStream_t stream)
{
multiply_const_kernel<<<grid_size, block_size, 0, stream>>>(in, out, k, n);
check_cuda_errors(cudaGetLastError());
}
void get_block_and_grid(int* minGrid, int* minBlock)
{
check_cuda_errors(cudaOccupancyMaxPotentialBlockSize(
minGrid, minBlock, multiply_const_kernel, 0, 0));
} |
b2fbbd78b5f993d6b402f76a4763582201711e0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_incremental_map_track.h"
#include <malloc.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "gpu_defines.h"
namespace SySal
{
namespace GPU
{
void PrismMapTracker::make_threads_blocks(int iterations, dim3 &threads, dim3 &blocks)
{
threads.x = __max(1, __min(iterations, m_Prop.maxThreadsPerBlock));
threads.y = threads.z = 1;
int totalblocks = int(ceil((double)iterations / (double)threads.x));
int maxblksx = m_Prop.maxGridSize[0] / m_Prop.maxThreadsPerBlock;
if (totalblocks >= maxblksx)
{
blocks.x = maxblksx;
blocks.y = int(ceil((double)totalblocks / (double)blocks.x));
}
else
{
blocks.x = totalblocks;
blocks.y = 1;
}
blocks.z = 1;
/*
if (threads.x * blocks.x > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d blocks of %d threads requested." , blocks.x, threads.x);
}
*/
if (blocks.y > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d y blocks requested." , blocks.y);
}
}
void PrismMapTracker::HardReset()
{
m_Chainer.HardReset();
m_Tracker.HardReset();
}
PrismMapTracker::PrismMapTracker(int gpuid) : CTOR_INIT(pHThisView), CTOR_INIT(pHLastView)
{
m_Chainer.pThis = this;
m_Tracker.pThis = this;
m_DeviceId = gpuid;
m_ChainDumper = 0;
m_PerformanceCounters.GPU = m_DeviceId;
m_PerformanceCounters.GPUClockMHz = 0;
m_PerformanceCounters.GPUCores = 0;
m_PerformanceCounters.MapTimeMS = 0;
m_PerformanceCounters.TrackTimeMS = 0;
m_PerformanceCounters.Clusters = 0;
m_PerformanceCounters.Chains = 0;
m_PerformanceCounters.Tracks = 0;
if (hipGetDeviceProperties(&m_Prop, m_DeviceId)) throw "Invalid CUDA device.";
m_PerformanceCounters.GPUClockMHz = m_Prop.clockRate / 1000;
int cores = 0;
if (m_Prop.major == 1)
{
cores = 8;
}
else if (m_Prop.major == 2)
{
if (m_Prop.minor == 0)
cores = 32;
else if (m_Prop.minor == 1)
cores = 48;
}
else if (m_Prop.major == 3)
{
cores = 192;
}
else if (m_Prop.major == 5)
{
cores = 128;
}
m_PerformanceCounters.GPUCores = m_Prop.multiProcessorCount * cores;
printf("\n\nCUDA properties for device %d\nCompute capability %d.%d\nMaxThreadsPerMultiProcessor %d\nMaxThreadsPerBlock %d\nMultiprocessors %d\nMaxGridSize: %d %d %d\nMemory %d MB\n\n",
gpuid, m_Prop.major, m_Prop.minor, m_Prop.maxThreadsPerMultiProcessor, m_Prop.maxThreadsPerBlock, m_Prop.multiProcessorCount, m_Prop.maxGridSize[0], m_Prop.maxGridSize[1], m_Prop.maxGridSize[2], m_Prop.totalGlobalMem / 1048576);
}
PrismMapTracker::~PrismMapTracker()
{
HOST_DEALLOC(pHLastView);
HOST_DEALLOC(pHThisView);
}
void PrismMapTracker::SetChainDumper(void *pContext, ChainDumper dmp)
{
m_ChainDumper = dmp;
m_CDContext = pContext;
}
void PrismMapTracker::SendViewsToTracker(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
if (m_ChainDumper)
{
int sz;
HOST_WISE_ALLOC(pHLastView, sizeof(ChainView));
hipMemcpy(pHLastView, m_Chainer.pLastView, sizeof(ChainView), hipMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHLastView, sz);
hipMemcpy(pHLastView, m_Chainer.pLastView, sz, hipMemcpyDeviceToHost);
HOST_WISE_ALLOC(pHThisView, sizeof(ChainView));
hipMemcpy(pHThisView, m_Chainer.pThisView, sizeof(ChainView), hipMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHThisView, sz);
hipMemcpy(pHThisView, m_Chainer.pThisView, sz, hipMemcpyDeviceToHost);
m_ChainDumper(m_CDContext, pHLastView, pHThisView);
}
m_Tracker.InternalFindTracks(minviewtag, width, height, pLastView, pThisView);
}
int PrismMapTracker::ClusterChainer::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::ClusterChainer::GetZScale() { return 1 << Z_SCALE_SHIFT; }
void PrismMapTracker::ClusterChainer::SetLogFileName(char *logfile) {}
ChainMapHeader *PrismMapTracker::ClusterChainer::Dump()
{
return 0;
}
SySal::ClusterChainer::Configuration PrismMapTracker::ClusterChainer::GetConfiguration()
{
return C;
}
SySal::OpaqueChainMap &PrismMapTracker::ClusterChainer::GetDeviceChainMap()
{
throw "Not supported.";
}
bool PrismMapTracker::ClusterChainer::SetReferenceZs(SySal::IntClusterFile &cf, bool istop)
{
EmulsionEdge t, b;
int refimg = 0;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (t.Valid && b.Valid)
{
int place = 0;
int i;
double thk = t.Z - b.Z;
for (i = 0; i < ThicknessSamples; i++)
if (pThicknessSamples[i] >= thk)
{
place = i;
break;
}
pThicknessSamples = (double *)realloc(pThicknessSamples, (ThicknessSamples + 1) * sizeof(double));
for (i = ThicknessSamples - 1; i >= place; i--)
pThicknessSamples[i + 1] = pThicknessSamples[i];
pThicknessSamples[place] = thk;
ThicknessSamples++;
}
return (ThicknessSamples >= 1);
}
double PrismMapTracker::GetThickness()
{
if (m_Chainer.ThicknessSamples <= 0) throw "No thickness info available.";
if (m_Chainer.ThicknessSamples % 2 == 1) return m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2];
return 0.5 * (m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2] + m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2 - 1]);
}
int PrismMapTracker::ClusterChainer::TotalChains()
{
return 0;
}
int PrismMapTracker::Tracker::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetZScale() { return 1 << Z_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetSlopeScale() { return 1 << SLOPE_SCALE_SHIFT; }
SySal::Tracker::Configuration PrismMapTracker::Tracker::GetConfiguration()
{
return C;
}
void PrismMapTracker::Tracker::SetOption(const char *option, const char *value)
{
if (strcmpi(option, "_MergeTracksKernel_LoopLimiter_") == 0)
{
if (sscanf(value, "%d", &_MergeTracksKernel_LoopLimiter_) != 1)
throw "Bad option value.";
}
}
void PrismMapTracker::Tracker::SetLogFileName(char *logfile) {}
SySal::TrackMapHeader *PrismMapTracker::Tracker::Dump() { return pHostTracks; }
int PrismMapTracker::Tracker::TotalTracks() { return pHostTracks->Count; }
int PrismMapTracker::Tracker::FindTracks(SySal::ChainMapHeader &cm)
{
throw "Not supported.";
}
int PrismMapTracker::Tracker::FindTracksInDevice(SySal::OpaqueChainMap &cm)
{
throw "Superseded by PrismMapTracker::Tracker::Dump.";
}
PrismMapTracker::PerformanceCounters PrismMapTracker::GetPerformanceCounters() { return m_PerformanceCounters; }
};
}; | b2fbbd78b5f993d6b402f76a4763582201711e0c.cu | #include "gpu_incremental_map_track.h"
#include <malloc.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "gpu_defines.h"
namespace SySal
{
namespace GPU
{
void PrismMapTracker::make_threads_blocks(int iterations, dim3 &threads, dim3 &blocks)
{
threads.x = __max(1, __min(iterations, m_Prop.maxThreadsPerBlock));
threads.y = threads.z = 1;
int totalblocks = int(ceil((double)iterations / (double)threads.x));
int maxblksx = m_Prop.maxGridSize[0] / m_Prop.maxThreadsPerBlock;
if (totalblocks >= maxblksx)
{
blocks.x = maxblksx;
blocks.y = int(ceil((double)totalblocks / (double)blocks.x));
}
else
{
blocks.x = totalblocks;
blocks.y = 1;
}
blocks.z = 1;
/*
if (threads.x * blocks.x > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d blocks of %d threads requested." , blocks.x, threads.x);
}
*/
if (blocks.y > m_Prop.maxGridSize[0])
{
printf("\nWARNING: Grid size exceeded! %d y blocks requested." , blocks.y);
}
}
void PrismMapTracker::HardReset()
{
m_Chainer.HardReset();
m_Tracker.HardReset();
}
PrismMapTracker::PrismMapTracker(int gpuid) : CTOR_INIT(pHThisView), CTOR_INIT(pHLastView)
{
m_Chainer.pThis = this;
m_Tracker.pThis = this;
m_DeviceId = gpuid;
m_ChainDumper = 0;
m_PerformanceCounters.GPU = m_DeviceId;
m_PerformanceCounters.GPUClockMHz = 0;
m_PerformanceCounters.GPUCores = 0;
m_PerformanceCounters.MapTimeMS = 0;
m_PerformanceCounters.TrackTimeMS = 0;
m_PerformanceCounters.Clusters = 0;
m_PerformanceCounters.Chains = 0;
m_PerformanceCounters.Tracks = 0;
if (cudaGetDeviceProperties(&m_Prop, m_DeviceId)) throw "Invalid CUDA device.";
m_PerformanceCounters.GPUClockMHz = m_Prop.clockRate / 1000;
int cores = 0;
if (m_Prop.major == 1)
{
cores = 8;
}
else if (m_Prop.major == 2)
{
if (m_Prop.minor == 0)
cores = 32;
else if (m_Prop.minor == 1)
cores = 48;
}
else if (m_Prop.major == 3)
{
cores = 192;
}
else if (m_Prop.major == 5)
{
cores = 128;
}
m_PerformanceCounters.GPUCores = m_Prop.multiProcessorCount * cores;
printf("\n\nCUDA properties for device %d\nCompute capability %d.%d\nMaxThreadsPerMultiProcessor %d\nMaxThreadsPerBlock %d\nMultiprocessors %d\nMaxGridSize: %d %d %d\nMemory %d MB\n\n",
gpuid, m_Prop.major, m_Prop.minor, m_Prop.maxThreadsPerMultiProcessor, m_Prop.maxThreadsPerBlock, m_Prop.multiProcessorCount, m_Prop.maxGridSize[0], m_Prop.maxGridSize[1], m_Prop.maxGridSize[2], m_Prop.totalGlobalMem / 1048576);
}
PrismMapTracker::~PrismMapTracker()
{
HOST_DEALLOC(pHLastView);
HOST_DEALLOC(pHThisView);
}
void PrismMapTracker::SetChainDumper(void *pContext, ChainDumper dmp)
{
m_ChainDumper = dmp;
m_CDContext = pContext;
}
void PrismMapTracker::SendViewsToTracker(int minviewtag, int width, int height, ChainView *pLastView, ChainView *pThisView)
{
if (m_ChainDumper)
{
int sz;
HOST_WISE_ALLOC(pHLastView, sizeof(ChainView));
cudaMemcpy(pHLastView, m_Chainer.pLastView, sizeof(ChainView), cudaMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHLastView, sz);
cudaMemcpy(pHLastView, m_Chainer.pLastView, sz, cudaMemcpyDeviceToHost);
HOST_WISE_ALLOC(pHThisView, sizeof(ChainView));
cudaMemcpy(pHThisView, m_Chainer.pThisView, sizeof(ChainView), cudaMemcpyDeviceToHost);
sz = pHLastView->Size();
HOST_WISE_ALLOC(pHThisView, sz);
cudaMemcpy(pHThisView, m_Chainer.pThisView, sz, cudaMemcpyDeviceToHost);
m_ChainDumper(m_CDContext, pHLastView, pHThisView);
}
m_Tracker.InternalFindTracks(minviewtag, width, height, pLastView, pThisView);
}
int PrismMapTracker::ClusterChainer::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::ClusterChainer::GetZScale() { return 1 << Z_SCALE_SHIFT; }
void PrismMapTracker::ClusterChainer::SetLogFileName(char *logfile) {}
ChainMapHeader *PrismMapTracker::ClusterChainer::Dump()
{
return 0;
}
SySal::ClusterChainer::Configuration PrismMapTracker::ClusterChainer::GetConfiguration()
{
return C;
}
SySal::OpaqueChainMap &PrismMapTracker::ClusterChainer::GetDeviceChainMap()
{
throw "Not supported.";
}
bool PrismMapTracker::ClusterChainer::SetReferenceZs(SySal::IntClusterFile &cf, bool istop)
{
EmulsionEdge t, b;
int refimg = 0;
FindEdges(t, b, cf, C.ClusterThreshold, refimg);
if (t.Valid && b.Valid)
{
int place = 0;
int i;
double thk = t.Z - b.Z;
for (i = 0; i < ThicknessSamples; i++)
if (pThicknessSamples[i] >= thk)
{
place = i;
break;
}
pThicknessSamples = (double *)realloc(pThicknessSamples, (ThicknessSamples + 1) * sizeof(double));
for (i = ThicknessSamples - 1; i >= place; i--)
pThicknessSamples[i + 1] = pThicknessSamples[i];
pThicknessSamples[place] = thk;
ThicknessSamples++;
}
return (ThicknessSamples >= 1);
}
double PrismMapTracker::GetThickness()
{
if (m_Chainer.ThicknessSamples <= 0) throw "No thickness info available.";
if (m_Chainer.ThicknessSamples % 2 == 1) return m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2];
return 0.5 * (m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2] + m_Chainer.pThicknessSamples[m_Chainer.ThicknessSamples / 2 - 1]);
}
int PrismMapTracker::ClusterChainer::TotalChains()
{
return 0;
}
int PrismMapTracker::Tracker::GetXYScale() { return 1 << XY_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetZScale() { return 1 << Z_SCALE_SHIFT; }
int PrismMapTracker::Tracker::GetSlopeScale() { return 1 << SLOPE_SCALE_SHIFT; }
SySal::Tracker::Configuration PrismMapTracker::Tracker::GetConfiguration()
{
return C;
}
void PrismMapTracker::Tracker::SetOption(const char *option, const char *value)
{
if (strcmpi(option, "_MergeTracksKernel_LoopLimiter_") == 0)
{
if (sscanf(value, "%d", &_MergeTracksKernel_LoopLimiter_) != 1)
throw "Bad option value.";
}
}
void PrismMapTracker::Tracker::SetLogFileName(char *logfile) {}
SySal::TrackMapHeader *PrismMapTracker::Tracker::Dump() { return pHostTracks; }
int PrismMapTracker::Tracker::TotalTracks() { return pHostTracks->Count; }
int PrismMapTracker::Tracker::FindTracks(SySal::ChainMapHeader &cm)
{
throw "Not supported.";
}
int PrismMapTracker::Tracker::FindTracksInDevice(SySal::OpaqueChainMap &cm)
{
throw "Superseded by PrismMapTracker::Tracker::Dump.";
}
PrismMapTracker::PerformanceCounters PrismMapTracker::GetPerformanceCounters() { return m_PerformanceCounters; }
};
}; |
e41bbf9c0b54927737c23aaa5d2054d0b86ed288.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/transpose_op.h"
#include <limits>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
// Cuda memory is precious so let's do a lower ndim limit.
#define COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS 6
namespace {
// TODO(jiayq): one possible optimization is to copy the buffer into a shared
// memory location to speed up access.
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* buffer, const int num_axes) {
int from_inds[COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS];
const int* from_counts = buffer;
const int* to_counts = buffer + num_axes;
const int* axes = buffer + num_axes * 2;
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int from_index = index, to_index = 0;
for (int i = num_axes - 1; i >= 0; --i) {
from_inds[i] = from_index % from_counts[i];
from_index = from_index / from_counts[i];
}
for (int i = 0; i < num_axes - 1; i++) {
to_index = (to_index + from_inds[axes[i]]) * to_counts[i + 1];
}
to_index += from_inds[axes[num_axes - 1]];
to_data[to_index] = from_data[index];
}
}
} // namespace
template <>
template <typename T>
bool TransposeOp<CUDAContext>::DoRunWithType() {
const auto& input = Input(0);
auto* output = Output(0);
int count = input.size();
int ndim = input.ndim();
CAFFE_ENFORCE(
count < std::numeric_limits<int>::max(),
"Transpose op on GPU only supports int32");
CAFFE_ENFORCE(
ndim <= COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS,
"Input ndim exceeds compile time max.");
// Buffer contains the following data:
// (1) the dimenions of the inputs
// (2) the dimension of the outputs
// (3) the axis mapping from inputs to outputs
buffer_cpu_.Resize(3 * ndim);
int* buffer_data = buffer_cpu_.mutable_data<int>();
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = input.dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = output->dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = axes_[i];
}
// Copy the dimension information to GPU.
buffer_.CopyFrom(buffer_cpu_, &context_);
hipLaunchKernelGGL(( transpose_gpu<T>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
count, input.template data<T>(), output->template mutable_data<T>(),
buffer_.data<int>(), ndim);
return true;
}
REGISTER_CUDA_OPERATOR(Transpose, TransposeOp<CUDAContext>);
} // namespace caffe2
| e41bbf9c0b54927737c23aaa5d2054d0b86ed288.cu | #include "caffe2/operators/transpose_op.h"
#include <limits>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
// Cuda memory is precious so let's do a lower ndim limit.
#define COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS 6
namespace {
// TODO(jiayq): one possible optimization is to copy the buffer into a shared
// memory location to speed up access.
template <typename Dtype>
__global__ void transpose_gpu(const int nthreads, const Dtype* from_data,
Dtype* to_data, const int* buffer, const int num_axes) {
int from_inds[COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS];
const int* from_counts = buffer;
const int* to_counts = buffer + num_axes;
const int* axes = buffer + num_axes * 2;
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int from_index = index, to_index = 0;
for (int i = num_axes - 1; i >= 0; --i) {
from_inds[i] = from_index % from_counts[i];
from_index = from_index / from_counts[i];
}
for (int i = 0; i < num_axes - 1; i++) {
to_index = (to_index + from_inds[axes[i]]) * to_counts[i + 1];
}
to_index += from_inds[axes[num_axes - 1]];
to_data[to_index] = from_data[index];
}
}
} // namespace
template <>
template <typename T>
bool TransposeOp<CUDAContext>::DoRunWithType() {
const auto& input = Input(0);
auto* output = Output(0);
int count = input.size();
int ndim = input.ndim();
CAFFE_ENFORCE(
count < std::numeric_limits<int>::max(),
"Transpose op on GPU only supports int32");
CAFFE_ENFORCE(
ndim <= COMPILE_TIME_CUDA_MAX_TRANSPOSE_DIMS,
"Input ndim exceeds compile time max.");
// Buffer contains the following data:
// (1) the dimenions of the inputs
// (2) the dimension of the outputs
// (3) the axis mapping from inputs to outputs
buffer_cpu_.Resize(3 * ndim);
int* buffer_data = buffer_cpu_.mutable_data<int>();
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = input.dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = output->dim32(i);
}
for (int i = 0; i < ndim; ++i) {
*(buffer_data++) = axes_[i];
}
// Copy the dimension information to GPU.
buffer_.CopyFrom(buffer_cpu_, &context_);
transpose_gpu<T><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
count, input.template data<T>(), output->template mutable_data<T>(),
buffer_.data<int>(), ndim);
return true;
}
REGISTER_CUDA_OPERATOR(Transpose, TransposeOp<CUDAContext>);
} // namespace caffe2
|
ba86c420cfaad44a95f4d84b4d3d138442d65f18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// @file fdwt97.cu
/// @brief CUDA implementation of forward 9/7 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-01-20 13:18
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "cudacommon.h"
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps a buffer and methods for computing 9/7 FDWT with sliding window
/// of specified size. Template arguments specify this size.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class FDWT97 {
private:
/// Type of shared memory buffer used for 9/7 DWT.
typedef TransformBuffer<float, WIN_SIZE_X, WIN_SIZE_Y + 7, 4> FDWT97Buffer;
/// Actual shared buffer used for forward 9/7 DWT.
FDWT97Buffer buffer;
/// Difference of indices of two vertically neighboring items in buffer.
enum { STRIDE = FDWT97Buffer::VERTICAL_STRIDE };
/// One thread's info about loading input image
/// @tparam CHECKED true if loader should check for image boundaries
template <bool CHECKED>
struct FDWT97ColumnLoadingInfo {
/// Loader of pixels from some input image.
VerticalDWTPixelLoader<float, CHECKED> loader;
/// Offset of column loaded by loader. (Offset in shared buffer.)
int offset;
};
/// Horizontal 9/7 FDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalFDWT97(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict1));
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update1));
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict2));
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update2));
__syncthreads();
buffer.scaleHorizontal(scale97Div, scale97Mul, firstLine, lines);
__syncthreads();
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check for image boundaries
/// @param column (uninitialized) object for loading input pixels
/// @param columnIndex index (not offset!) of the column to be loaded
/// (relative to threadblock's first column)
/// @param input pointer to input image in GPU memory
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param firstY index of first row to be loaded from image
template <bool CHECKED>
__device__ void initColumn(FDWT97ColumnLoadingInfo<CHECKED> & column,
const int columnIndex, const float * const input,
const int sizeX, const int sizeY,
const int firstY) {
// get offset of the column with index 'columnIndex'
column.offset = buffer.getColumnOffset(columnIndex);
// x-coordinate of the first pixel to be loaded by given loader
const int firstX = blockIdx.x * WIN_SIZE_X + columnIndex;
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 7 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 4 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 3 * STRIDE] =
buffer[column.offset + 5 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 2 * STRIDE] =
buffer[column.offset + 6 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 0 * STRIDE] = column.loader.loadFrom(input);
// reinitialize loader to start with pixel #3 again
column.loader.init(sizeX, sizeY, firstX, firstY + 3);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 4);
// load 7 rows into the transform buffer
for(int i = 0; i < 7; i++) {
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
}
// Now, the next pixel, which will be loaded by loader, is pixel #3.
}
/// Loads another WIN_SIZE_Y pixels into given column using given loader.
/// @tparam CHECKED true if loader should check for image boundaries
/// @param input input image to load from
/// @param column loader and offset of loaded column in shared buffer
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const float * const input,
FDWT97ColumnLoadingInfo<CHECKED> & column) {
for(int i = 7; i < (7 + WIN_SIZE_Y); i++) {
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
}
/// Main GPU 9/7 FDWT entry point.
/// @tparam CHECK_LOADS true if boundaries should be checked when loading
/// @tparam CHECK_WRITES true if boundaries should be checked when writing
/// @param in input image
/// @param out output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of steps of sliding window
template <bool CHECK_LOADS, bool CHECK_WRITES>
__device__ void transform(const float * const in, float * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about columns loaded by this thread: one main column and possibly
// one boundary column. (Only some threads load some boundary column.)
FDWT97ColumnLoadingInfo<CHECK_LOADS> loadedColumn;
FDWT97ColumnLoadingInfo<CHECK_LOADS> boundaryColumn;
// Initialize first 7 lines of transform buffer.
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
initColumn(loadedColumn, threadIdx.x, in, sizeX, sizeY, firstY);
// Some threads initialize boundary columns.
boundaryColumn.offset = 0;
boundaryColumn.loader.clear();
if(threadIdx.x < 7) {
// each thread among first 7 ones gets index of one of boundary columns
const int colId = threadIdx.x + ((threadIdx.x < 3) ? WIN_SIZE_X : -7);
// Thread initializes offset of the boundary column (in shared buffer),
// first 7 pixels of the column and a loader for this column.
initColumn(boundaryColumn, colId, in, sizeX, sizeY, firstY);
}
// horizontally transform first 7 rows in all columns
horizontalFDWT97(7, 0);
// Index of column handled by this thread. (First half of threads handle
// even columns and others handle odd columns.)
const int outColumnIndex = parityIdx<WIN_SIZE_X>();
// writer of output linear bands - initialize it
const int firstX = blockIdx.x * WIN_SIZE_X + outColumnIndex;
VerticalDWTBandWriter<float, CHECK_WRITES> writer;
writer.init(sizeX, sizeY, firstX, firstY);
// transform buffer offset of column transformed and saved by this thread
const int outColumnOffset = buffer.getColumnOffset(outColumnIndex);
// (Each iteration of this loop assumes that first 7 rows of transform
// buffer are already loaded with horizontally transformed coefficients.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of thread's column into the buffer.
loadWindowIntoColumn(in, loadedColumn);
// some threads also load boundary columns
if(threadIdx.x < 7) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalFDWT97(WIN_SIZE_Y, 7);
// Using 7 registers, remember current values of last 7 rows of
// transform buffer. These rows are transformed horizontally only
// and will be used in next iteration.
float last7Lines[7];
for(int i = 0; i < 7; i++) {
last7Lines[i] = buffer[outColumnOffset + (WIN_SIZE_Y + i) * STRIDE];
}
// vertically transform all central columns (do not scale yet)
buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict1));
buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update1));
buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict2));
buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update2));
// Save all results of current window. Results are in transform buffer
// at rows from #4 to #(4 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical FDWT.)
for(int i = 4; i < (4 + WIN_SIZE_Y); i += 2) {
const int index = outColumnOffset + i * STRIDE;
// Write low coefficients from column into low band ...
writer.writeLowInto(out, buffer[index] * scale97Div);
// ... and high coeficients into the high band.
writer.writeHighInto(out, buffer[index + STRIDE] * scale97Mul);
}
// Use last 7 remembered lines as first 7 lines for next iteration.
// As expected, these lines are already horizontally transformed.
for(int i = 0; i < 7; i++) {
buffer[outColumnOffset + i * STRIDE] = last7Lines[i];
}
// Wait for all writing threads before proceeding to loading new
// pixels in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Runs one of specialized variants of 9/7 FDWT according to distance of
/// processed pixels to image boudnary. Some variants do not check for
/// boudnary and thus are slightly faster.
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of steps of sliding window
__device__ static void run(const float * const input, float * const output,
const int sx, const int sy, const int steps) {
// object with transform buffer in shared memory
__shared__ FDWT97<WIN_SIZE_X, WIN_SIZE_Y> fdwt97;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (3 in next expressions is for radius of impulse response of 9/7 FDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 3;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 3;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
fdwt97.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
fdwt97.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
fdwt97.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class FDWT97
/// Main GPU 9/7 FDWT entry point.
/// @param input input image
/// @parma output output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of steps of sliding window
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT97<WIN_SX, WIN_SY>), 8))
__global__ void fdwt97Kernel(const float * const input, float * const output,
const int sx, const int sy, const int steps) {
// Excuse me, dear reader of this code - this call have to be here. If you
// try to simply put contents of following method right here, CUDA compiler
// (version 3.2) will spit tons of nonsense messy errors ...
// Hope they will not break it even more in future releases.
FDWT97<WIN_SX, WIN_SY>::run(input, output, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 9/7 FDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchFDWT97Kernel (float * in, float * out, int sx, int sy, float &kernelTime) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// timing events
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsedTime;
// run kernel, possibly measure time and finally check the call
hipEventRecord(start, 0);
hipLaunchKernelGGL(( fdwt97Kernel<WIN_SX, WIN_SY>), dim3(gSize), dim3(WIN_SX), 0, 0, in, out, sx, sy, steps);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}
/// Forward 9/7 2D DWT. See common rules (dwt.h) for more details.
/// @param in Input DWT coefficients. Should be normalized (in range
/// [-0.5, 0.5]). Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - format specified in common rules
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
float fdwt97(float * in, float * out, int sizeX, int sizeY, int levels) {
float kernelTime = 0;
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchFDWT97Kernel<192, 8>(in, out, sizeX, sizeY, kernelTime);
} else if (sizeX >= 480) {
launchFDWT97Kernel<128, 6>(in, out, sizeX, sizeY, kernelTime);
} else {
launchFDWT97Kernel<64, 6>(in, out, sizeX, sizeY, kernelTime);
}
// if this was not the last level, continue recursively with other levels
if(levels > 1) {
// copy output's LL band back into input buffer
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
memCopy(in, out, llSizeX, llSizeY);
// run remaining levels of FDWT
kernelTime += fdwt97(in, out, llSizeX, llSizeY, levels - 1);
}
return kernelTime;
}
} // end of namespace dwt_cuda
| ba86c420cfaad44a95f4d84b4d3d138442d65f18.cu | ///
/// @file fdwt97.cu
/// @brief CUDA implementation of forward 9/7 2D DWT.
/// @author Martin Jirman ([email protected])
/// @date 2011-01-20 13:18
///
///
/// Copyright (c) 2011 Martin Jirman
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
///
#include "cudacommon.h"
#include "common.h"
#include "transform_buffer.h"
#include "io.h"
namespace dwt_cuda {
/// Wraps a buffer and methods for computing 9/7 FDWT with sliding window
/// of specified size. Template arguments specify this size.
/// @tparam WIN_SIZE_X width of sliding window
/// @tparam WIN_SIZE_Y height of sliding window
template <int WIN_SIZE_X, int WIN_SIZE_Y>
class FDWT97 {
private:
/// Type of shared memory buffer used for 9/7 DWT.
typedef TransformBuffer<float, WIN_SIZE_X, WIN_SIZE_Y + 7, 4> FDWT97Buffer;
/// Actual shared buffer used for forward 9/7 DWT.
FDWT97Buffer buffer;
/// Difference of indices of two vertically neighboring items in buffer.
enum { STRIDE = FDWT97Buffer::VERTICAL_STRIDE };
/// One thread's info about loading input image
/// @tparam CHECKED true if loader should check for image boundaries
template <bool CHECKED>
struct FDWT97ColumnLoadingInfo {
/// Loader of pixels from some input image.
VerticalDWTPixelLoader<float, CHECKED> loader;
/// Offset of column loaded by loader. (Offset in shared buffer.)
int offset;
};
/// Horizontal 9/7 FDWT on specified lines of transform buffer.
/// @param lines number of lines to be transformed
/// @param firstLine index of the first line to be transformed
__device__ void horizontalFDWT97(const int lines, const int firstLine) {
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict1));
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update1));
__syncthreads();
buffer.forEachHorizontalOdd(firstLine, lines, AddScaledSum(f97Predict2));
__syncthreads();
buffer.forEachHorizontalEven(firstLine, lines, AddScaledSum(f97Update2));
__syncthreads();
buffer.scaleHorizontal(scale97Div, scale97Mul, firstLine, lines);
__syncthreads();
}
/// Initializes one column of shared transform buffer with 7 input pixels.
/// Those 7 pixels will not be transformed. Also initializes given loader.
/// @tparam CHECKED true if loader should check for image boundaries
/// @param column (uninitialized) object for loading input pixels
/// @param columnIndex index (not offset!) of the column to be loaded
/// (relative to threadblock's first column)
/// @param input pointer to input image in GPU memory
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param firstY index of first row to be loaded from image
template <bool CHECKED>
__device__ void initColumn(FDWT97ColumnLoadingInfo<CHECKED> & column,
const int columnIndex, const float * const input,
const int sizeX, const int sizeY,
const int firstY) {
// get offset of the column with index 'columnIndex'
column.offset = buffer.getColumnOffset(columnIndex);
// x-coordinate of the first pixel to be loaded by given loader
const int firstX = blockIdx.x * WIN_SIZE_X + columnIndex;
if(blockIdx.y == 0) {
// topmost block - apply mirroring rules when loading first 7 rows
column.loader.init(sizeX, sizeY, firstX, firstY);
// load pixels in mirrored way
buffer[column.offset + 4 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 3 * STRIDE] =
buffer[column.offset + 5 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 2 * STRIDE] =
buffer[column.offset + 6 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 1 * STRIDE] = column.loader.loadFrom(input);
buffer[column.offset + 0 * STRIDE] = column.loader.loadFrom(input);
// reinitialize loader to start with pixel #3 again
column.loader.init(sizeX, sizeY, firstX, firstY + 3);
} else {
// non-topmost row - regular loading:
column.loader.init(sizeX, sizeY, firstX, firstY - 4);
// load 7 rows into the transform buffer
for(int i = 0; i < 7; i++) {
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
}
// Now, the next pixel, which will be loaded by loader, is pixel #3.
}
/// Loads another WIN_SIZE_Y pixels into given column using given loader.
/// @tparam CHECKED true if loader should check for image boundaries
/// @param input input image to load from
/// @param column loader and offset of loaded column in shared buffer
template <bool CHECKED>
inline __device__ void loadWindowIntoColumn(const float * const input,
FDWT97ColumnLoadingInfo<CHECKED> & column) {
for(int i = 7; i < (7 + WIN_SIZE_Y); i++) {
buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input);
}
}
/// Main GPU 9/7 FDWT entry point.
/// @tparam CHECK_LOADS true if boundaries should be checked when loading
/// @tparam CHECK_WRITES true if boundaries should be checked when writing
/// @param in input image
/// @param out output buffer
/// @param sizeX width of the input image
/// @param sizeY height of the input image
/// @param winSteps number of steps of sliding window
template <bool CHECK_LOADS, bool CHECK_WRITES>
__device__ void transform(const float * const in, float * const out,
const int sizeX, const int sizeY,
const int winSteps) {
// info about columns loaded by this thread: one main column and possibly
// one boundary column. (Only some threads load some boundary column.)
FDWT97ColumnLoadingInfo<CHECK_LOADS> loadedColumn;
FDWT97ColumnLoadingInfo<CHECK_LOADS> boundaryColumn;
// Initialize first 7 lines of transform buffer.
const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps;
initColumn(loadedColumn, threadIdx.x, in, sizeX, sizeY, firstY);
// Some threads initialize boundary columns.
boundaryColumn.offset = 0;
boundaryColumn.loader.clear();
if(threadIdx.x < 7) {
// each thread among first 7 ones gets index of one of boundary columns
const int colId = threadIdx.x + ((threadIdx.x < 3) ? WIN_SIZE_X : -7);
// Thread initializes offset of the boundary column (in shared buffer),
// first 7 pixels of the column and a loader for this column.
initColumn(boundaryColumn, colId, in, sizeX, sizeY, firstY);
}
// horizontally transform first 7 rows in all columns
horizontalFDWT97(7, 0);
// Index of column handled by this thread. (First half of threads handle
// even columns and others handle odd columns.)
const int outColumnIndex = parityIdx<WIN_SIZE_X>();
// writer of output linear bands - initialize it
const int firstX = blockIdx.x * WIN_SIZE_X + outColumnIndex;
VerticalDWTBandWriter<float, CHECK_WRITES> writer;
writer.init(sizeX, sizeY, firstX, firstY);
// transform buffer offset of column transformed and saved by this thread
const int outColumnOffset = buffer.getColumnOffset(outColumnIndex);
// (Each iteration of this loop assumes that first 7 rows of transform
// buffer are already loaded with horizontally transformed coefficients.)
for(int w = 0; w < winSteps; w++) {
// Load another WIN_SIZE_Y lines of thread's column into the buffer.
loadWindowIntoColumn(in, loadedColumn);
// some threads also load boundary columns
if(threadIdx.x < 7) {
loadWindowIntoColumn(in, boundaryColumn);
}
// horizontally transform all newly loaded lines
horizontalFDWT97(WIN_SIZE_Y, 7);
// Using 7 registers, remember current values of last 7 rows of
// transform buffer. These rows are transformed horizontally only
// and will be used in next iteration.
float last7Lines[7];
for(int i = 0; i < 7; i++) {
last7Lines[i] = buffer[outColumnOffset + (WIN_SIZE_Y + i) * STRIDE];
}
// vertically transform all central columns (do not scale yet)
buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict1));
buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update1));
buffer.forEachVerticalOdd(outColumnOffset, AddScaledSum(f97Predict2));
buffer.forEachVerticalEven(outColumnOffset, AddScaledSum(f97Update2));
// Save all results of current window. Results are in transform buffer
// at rows from #4 to #(4 + WIN_SIZE_Y). Other rows are invalid now.
// (They only served as a boundary for vertical FDWT.)
for(int i = 4; i < (4 + WIN_SIZE_Y); i += 2) {
const int index = outColumnOffset + i * STRIDE;
// Write low coefficients from column into low band ...
writer.writeLowInto(out, buffer[index] * scale97Div);
// ... and high coeficients into the high band.
writer.writeHighInto(out, buffer[index + STRIDE] * scale97Mul);
}
// Use last 7 remembered lines as first 7 lines for next iteration.
// As expected, these lines are already horizontally transformed.
for(int i = 0; i < 7; i++) {
buffer[outColumnOffset + i * STRIDE] = last7Lines[i];
}
// Wait for all writing threads before proceeding to loading new
// pixels in next iteration. (Not to overwrite those which
// are not written yet.)
__syncthreads();
}
}
public:
/// Runs one of specialized variants of 9/7 FDWT according to distance of
/// processed pixels to image boudnary. Some variants do not check for
/// boudnary and thus are slightly faster.
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of steps of sliding window
__device__ static void run(const float * const input, float * const output,
const int sx, const int sy, const int steps) {
// object with transform buffer in shared memory
__shared__ FDWT97<WIN_SIZE_X, WIN_SIZE_Y> fdwt97;
// Compute limits of this threadblock's block of pixels and use them to
// determine, whether this threadblock will have to deal with boundary.
// (3 in next expressions is for radius of impulse response of 9/7 FDWT.)
const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 3;
const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 3;
const bool atRightBoudary = maxX >= sx;
const bool atBottomBoudary = maxY >= sy;
// Select specialized version of code according to distance of this
// threadblock's pixels from image boundary.
if(atBottomBoudary) {
// near bottom boundary => check both writing and reading
fdwt97.transform<true, true>(input, output, sx, sy, steps);
} else if(atRightBoudary) {
// near right boundary only => check writing only
fdwt97.transform<false, true>(input, output, sx, sy, steps);
} else {
// no nearby boundary => check nothing
fdwt97.transform<false, false>(input, output, sx, sy, steps);
}
}
}; // end of class FDWT97
/// Main GPU 9/7 FDWT entry point.
/// @param input input image
/// @parma output output buffer
/// @param sx width of the input image
/// @param sy height of the input image
/// @param steps number of steps of sliding window
template <int WIN_SX, int WIN_SY>
__launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT97<WIN_SX, WIN_SY>), 8))
__global__ void fdwt97Kernel(const float * const input, float * const output,
const int sx, const int sy, const int steps) {
// Excuse me, dear reader of this code - this call have to be here. If you
// try to simply put contents of following method right here, CUDA compiler
// (version 3.2) will spit tons of nonsense messy errors ...
// Hope they will not break it even more in future releases.
FDWT97<WIN_SX, WIN_SY>::run(input, output, sx, sy, steps);
}
/// Only computes optimal number of sliding window steps,
/// number of threadblocks and then lanches the 9/7 FDWT kernel.
/// @tparam WIN_SX width of sliding window
/// @tparam WIN_SY height of sliding window
/// @param in input image
/// @param out output buffer
/// @param sx width of the input image
/// @param sy height of the input image
template <int WIN_SX, int WIN_SY>
void launchFDWT97Kernel (float * in, float * out, int sx, int sy, float &kernelTime) {
// compute optimal number of steps of each sliding window
const int steps = divRndUp(sy, 15 * WIN_SY);
// prepare grid size
dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps));
// timing events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
// run kernel, possibly measure time and finally check the call
cudaEventRecord(start, 0);
fdwt97Kernel<WIN_SX, WIN_SY><<<gSize, WIN_SX>>>(in, out, sx, sy, steps);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
}
/// Forward 9/7 2D DWT. See common rules (dwt.h) for more details.
/// @param in Input DWT coefficients. Should be normalized (in range
/// [-0.5, 0.5]). Will not be preserved (will be overwritten).
/// @param out output buffer on GPU - format specified in common rules
/// @param sizeX width of input image (in pixels)
/// @param sizeY height of input image (in pixels)
/// @param levels number of recursive DWT levels
float fdwt97(float * in, float * out, int sizeX, int sizeY, int levels) {
float kernelTime = 0;
// select right width of kernel for the size of the image
if(sizeX >= 960) {
launchFDWT97Kernel<192, 8>(in, out, sizeX, sizeY, kernelTime);
} else if (sizeX >= 480) {
launchFDWT97Kernel<128, 6>(in, out, sizeX, sizeY, kernelTime);
} else {
launchFDWT97Kernel<64, 6>(in, out, sizeX, sizeY, kernelTime);
}
// if this was not the last level, continue recursively with other levels
if(levels > 1) {
// copy output's LL band back into input buffer
const int llSizeX = divRndUp(sizeX, 2);
const int llSizeY = divRndUp(sizeY, 2);
memCopy(in, out, llSizeX, llSizeY);
// run remaining levels of FDWT
kernelTime += fdwt97(in, out, llSizeX, llSizeY, levels - 1);
}
return kernelTime;
}
} // end of namespace dwt_cuda
|
96e2a3e1b45c6d30ddca7135f488e696348d10da.hip | // !!! This is a file automatically generated by hipify!!!
//
// GALS - Before compiling the program, update the section of this program that is in the beginning of main and update all the functions in Initializer.h
//
// Created by Raunak Bardia on 12/10/17.
//
// DISCLAIMER:
// Use the indexes carefully
// First index of array represents movement along y-direction because it represents rows
// Second index of array represents movement along x-direction because it represents columns
//
// Implementing GALS for a given initial level set function
// in a specified velocity field for a grid of cells
//
// Given -
// 1. Defining function at t = 0 which implies that phi and psi values are available for all node points at t = 0
// 2. Given velocity for the complete domain at all times
//
// All required data is stored in separate 2D matrices of phi, psix, psiy and psixy
// Boundary Condition grad(velocity).n > 0
//
// THIS IMPLEMENTATION WON'T WORK IF THE GRID IS SMALLER THAN (2 X 2)
#include <iostream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
#include <fstream>
#include <sys/time.h>
#include <time.h>
#include <string.h>
#include <vector>
#include <tuple>
#include <hip/hip_runtime.h>
#include <chrono>
#include "Allocation.h"
//#include "InitializeLevelSet.h"
//Including Kernel
#include "AdvectionPointCalcsCUDA.cu"
#include "VortexVelocityCUDA.cu"
// y direction is the first index of array, x direction is the second index of array
using namespace std;
int main(){
/* UPDATE ALL THE FOLLOWING VALUES */
double xlim1 = 0.0; //Lower limit on x-axis
double xlim2 = 1.0; //Upper limit on x-axis
unsigned int nx = 32; //Number of nodes in x-direction INCLUDING THE EXTREME VALUES
double ylim1 = 0.0; //Lower limit on y-axis
double ylim2 = 1.0; //Upper limit on y-axis
unsigned int ny = 32; //Number of nodes INCLUDING THE EXTREME VALUES
double dt = 0.5 * 1.0/512.0; //Length of time step
double Tfinal = 8.0; //Total time period for the simulation
unsigned int option = 2; //Option - if you need animation initialize at 1 else initialize at 2
unsigned int printstep = 256; //How frequently do you want to store the images (every nth time step)
char psischeme[] = "SuperConsistent"; //'SuperConsistent' or 'Heuns'
char backtrace_scheme[] = "RK3" ; //'Euler' or 'RK3'
double T_period = 8.0; //Period of the velocity field
unsigned int TileSize = 8;
//---------------------------------------------------------------------------------------------------------
//MAKE SURE THAT YOU HAVE ENOUGH MEMORY SPACE IF YOU ARE STORING A LOT OF TIME STEP VALUES BECAUSE IT STORES ACROSS GRID POINTS FOR EACH PRINTSTEP
/* USER UPDATE OVER */
unsigned long gridmemory = nx * ny * sizeof(double);
unsigned long gridmemoryint = nx * ny * sizeof(unsigned int);
unsigned int n = Tfinal/dt; //Number of time steps
if(option != 1)
printstep = n;
dim3 dimBlock(TileSize, TileSize);
dim3 dimGrid(nx/dimBlock.x, ny/dimBlock.y);
// Node Locations
double dx = (xlim2 - xlim1)/(nx - 1);
double dy = (ylim2 - ylim1)/(ny - 1);
double* x = (double*) malloc(nx * sizeof(double));
double* y = (double*) malloc(ny * sizeof(double));
gridnodes(x,y,xlim1,ylim1,dx,dy,nx,ny);
double *devicex, *devicey;
// allocate device memory for x and y
hipMalloc((void**)&devicex,nx * sizeof(double)); // Allocating GPU memory for the x-node values
hipMalloc((void**)&devicey,ny * sizeof(double)); // Allocating GPU memory for the y-node values
// Copy data from host to GPU
hipMemcpy(devicex, x, nx * sizeof(double), hipMemcpyHostToDevice); // Writing to device memory
hipMemcpy(devicey, y, ny * sizeof(double), hipMemcpyHostToDevice); // Writing to device memory
// level set matrices
double* mphi = (double*) malloc(gridmemory);
double* mpsix = (double*) malloc(gridmemory);
double* mpsiy = (double*) malloc(gridmemory);
double* mpsixy = (double*) malloc(gridmemory);
double *masterdphi, *masterdpsix, *masterdpsiy, *masterdpsixy;
// allocate device memory for integer grids
hipMalloc((void**)&masterdphi,gridmemory); // Allocating GPU memory for the x-node values
hipMalloc((void**)&masterdpsix,gridmemory); // Allocating GPU memory for the y-node values
hipMalloc((void**)&masterdpsiy,gridmemory); // Allocating GPU memory for the y-node values
hipMalloc((void**)&masterdpsixy,gridmemory); // Allocating GPU memory for the y-node values
double *dphi, *dpsix, *dpsiy, *dpsixy;
// allocate device memory for integer grids
hipMalloc((void**)&dphi,gridmemory); // Allocating GPU memory for the x-node values
hipMalloc((void**)&dpsix,gridmemory); // Allocating GPU memory for the y-node values
hipMalloc((void**)&dpsiy,gridmemory); // Allocating GPU memory for the y-node values
hipMalloc((void**)&dpsixy,gridmemory); // Allocating GPU memory for the y-node values
// Initializing at t = 0
hipLaunchKernelGGL(( allocate_levelset_matrices_CUDA), dim3(dimGrid), dim3(dimBlock), 0, 0, masterdphi, masterdpsix, masterdpsiy, masterdpsixy, devicex, devicey, nx, ny); //Initializing level set matrices
hipLaunchKernelGGL(( allocate_levelset_matrices_CUDA), dim3(dimGrid), dim3(dimBlock), 0, 0, dphi, dpsix, dpsiy, dpsixy, devicex, devicey, nx, ny); //Initializing level set matrices
// Initializing at t = 0
allocate_levelset_matrices(mphi,mpsix,mpsiy,mpsixy,x,y,nx,ny); //Initializing level set matrices
// Removing existing files with these names if any
/* remove("phi.txt");
remove("psix.txt");
remove("psiy.txt");
remove("psixy.txt");
remove("details.txt");
remove("Velocity_x.txt");
remove("Velocity_y.txt");
fileprint(mphi,mpsix,mpsiy,mpsixy,nx,ny,x,y,0.0,T_period);
ofstream details;
details.open("details.txt", ios::out | ios::app);
details<< nx << "," << ny << "," << std::fixed << std::setprecision(10) << dx << "," << dy << "," << xlim1 << "," << xlim2 << "," << ylim1 << "," << ylim2 << "," << n << "," << dt << "," << printstep;
details.close();
*/
///*
// TIME STEPPING LOOP
// If only the initial and final profiles are needed
// This section will be deleted after a proper profiler is installed on my computer - Raunak Bardia
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
float t_calcpts=0.0,t_findpts=0.0,t_update=0.0,t_mixed=0.0,t_copy=0.0,t_transfer=0.0;
auto tbegin = chrono::high_resolution_clock::now();
//
for(unsigned int t = 0; t < n; t++){
double *dxadv, *dyadv;
// allocate device memory for x and y
hipMalloc((void**)&dxadv,gridmemory); // Allocating GPU memory for the x-node values
hipMalloc((void**)&dyadv,gridmemory); // Allocating GPU memory for the y-node values
unsigned int *dcellx, *dcelly,*dtracker;
// allocate device memory for integer grids
hipMalloc((void**)&dcellx,gridmemoryint); // Allocating GPU memory for the x-node values
hipMalloc((void**)&dcelly,gridmemoryint); // Allocating GPU memory for the y-node values
hipMalloc((void**)&dtracker,gridmemoryint); // Allocating GPU memory for the y-node values
float tempt = 0.0;
// Find the point from which advection occurs at this time step
hipEventRecord(startEvent,0);
hipLaunchKernelGGL(( advection_point_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, devicex,devicey,dxadv,dyadv,nx,t,dt,T_period,TileSize);
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_calcpts += tempt;
tempt = 0.0;
// Find the cell in which those advection points lie
hipEventRecord(startEvent,0);
hipLaunchKernelGGL(( find_advection_point_location_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, devicex,devicey,dxadv,dyadv,nx,ny,dcellx,dcelly,dtracker,xlim1,xlim2,ylim1,ylim2,TileSize);
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_findpts += tempt;
tempt = 0.0;
// Update the level set values
hipEventRecord(startEvent,0);
hipLaunchKernelGGL(( update_levelset_data_cuda), dim3(dimGrid),dim3(dimBlock), 0, 0, devicex, devicey, dxadv, dyadv, dcellx, dcelly, dtracker, t, dt, dphi, dpsix, dpsiy, dpsixy, masterdphi, masterdpsix, masterdpsiy,psischeme,backtrace_scheme,T_period,nx,ny,TileSize);
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_update += tempt;
tempt = 0.0;
// Create another copy to preserve data which gets modified on the fly in next loop
hipEventRecord(startEvent,0);
hipLaunchKernelGGL(( devicetodevicecopy), dim3(dimGrid),dim3(dimBlock), 0, 0, dphi,dpsix,dpsiy,masterdphi,masterdpsix,masterdpsiy,nx,TileSize);
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_copy += tempt;
tempt = 0.0;
// Update the mixed derivatives now for the remaining grid points
hipEventRecord(startEvent,0);
hipLaunchKernelGGL(( update_mixed_derivatives), dim3(dimGrid),dim3(dimBlock), 0, 0, dpsix, dpsiy, dpsixy, nx, ny, dx, dy,TileSize);
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_mixed += tempt;
hipDeviceSynchronize();
//---------------------------------------------------------------------------------------------------------
// Feeding phi, psix, psiy and psixy values in their respective files
/* if((t+1) % printstep == 0)
{
tempt = 0.0;
hipEventRecord(startEvent,0);
hipMemcpy(mphi, masterdphi, gridmemory, hipMemcpyDeviceToHost); // Writing back to host memory
hipMemcpy(mpsix, masterdpsix, gridmemory, hipMemcpyDeviceToHost); // Writing back to host memory
hipMemcpy(mpsiy, masterdpsiy, gridmemoryint, hipMemcpyDeviceToHost); // Writing back to host memory
hipEventRecord(stopEvent,0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&tempt, startEvent, stopEvent);
t_transfer += tempt;
//hipMemcpy(mpsixy, masterdpsixy, gridmemoryint, hipMemcpyDeviceToHost); // Writing back to host memory
fileprint(mphi,mpsix,mpsiy,mpsixy,nx,ny,x,y,(t+1)*dt,T_period);
}
cout<< t+1;
cout<< " Time Step Completed" <<'\n';
*/
//---------------------------------------------------------------------------------------------------------
//xadv.clear();
//yadv.clear();
//tracker.clear();
//cellx.clear();
//celly.clear();
hipFree(dxadv);
hipFree(dyadv);
hipFree(dcellx);
hipFree(dcelly);
hipFree(dtracker);
} // end of time marching loop
//*/
auto tend = chrono::high_resolution_clock::now();
float duration = chrono::duration_cast<chrono::nanoseconds>(tend-tbegin).count();
duration = duration * pow(10.0,-6);
cout << "Time taken for calculation of advection points = " << t_calcpts << '\n';
cout << "Time taken for finding location of advection points in the grid = " << t_findpts << '\n';
cout << "Time taken for hermite Update = " << t_update << '\n';
cout << "Time taken for copying the matrix = " << t_copy << '\n';
cout << "Time taken for calculation of mixed derivatives = " << t_mixed << '\n';
cout << "Time taken for transfer of level set data = " << t_transfer << '\n';
cout << "Total Duration of the Time Loop = " << duration << endl;
return 0;
}
| 96e2a3e1b45c6d30ddca7135f488e696348d10da.cu | //
// GALS - Before compiling the program, update the section of this program that is in the beginning of main and update all the functions in Initializer.h
//
// Created by Raunak Bardia on 12/10/17.
//
// DISCLAIMER:
// Use the indexes carefully
// First index of array represents movement along y-direction because it represents rows
// Second index of array represents movement along x-direction because it represents columns
//
// Implementing GALS for a given initial level set function
// in a specified velocity field for a grid of cells
//
// Given -
// 1. Defining function at t = 0 which implies that phi and psi values are available for all node points at t = 0
// 2. Given velocity for the complete domain at all times
//
// All required data is stored in separate 2D matrices of phi, psix, psiy and psixy
// Boundary Condition grad(velocity).n > 0
//
// THIS IMPLEMENTATION WON'T WORK IF THE GRID IS SMALLER THAN (2 X 2)
#include <iostream>
#include <iomanip>
#include <math.h>
#include <stdio.h>
#include <fstream>
#include <sys/time.h>
#include <time.h>
#include <string.h>
#include <vector>
#include <tuple>
#include <cuda.h>
#include <chrono>
#include "Allocation.h"
//#include "InitializeLevelSet.h"
//Including Kernel
#include "AdvectionPointCalcsCUDA.cu"
#include "VortexVelocityCUDA.cu"
// y direction is the first index of array, x direction is the second index of array
using namespace std;
int main(){
/* UPDATE ALL THE FOLLOWING VALUES */
double xlim1 = 0.0; //Lower limit on x-axis
double xlim2 = 1.0; //Upper limit on x-axis
unsigned int nx = 32; //Number of nodes in x-direction INCLUDING THE EXTREME VALUES
double ylim1 = 0.0; //Lower limit on y-axis
double ylim2 = 1.0; //Upper limit on y-axis
unsigned int ny = 32; //Number of nodes INCLUDING THE EXTREME VALUES
double dt = 0.5 * 1.0/512.0; //Length of time step
double Tfinal = 8.0; //Total time period for the simulation
unsigned int option = 2; //Option - if you need animation initialize at 1 else initialize at 2
unsigned int printstep = 256; //How frequently do you want to store the images (every nth time step)
char psischeme[] = "SuperConsistent"; //'SuperConsistent' or 'Heuns'
char backtrace_scheme[] = "RK3" ; //'Euler' or 'RK3'
double T_period = 8.0; //Period of the velocity field
unsigned int TileSize = 8;
//---------------------------------------------------------------------------------------------------------
//MAKE SURE THAT YOU HAVE ENOUGH MEMORY SPACE IF YOU ARE STORING A LOT OF TIME STEP VALUES BECAUSE IT STORES ACROSS GRID POINTS FOR EACH PRINTSTEP
/* USER UPDATE OVER */
unsigned long gridmemory = nx * ny * sizeof(double);
unsigned long gridmemoryint = nx * ny * sizeof(unsigned int);
unsigned int n = Tfinal/dt; //Number of time steps
if(option != 1)
printstep = n;
dim3 dimBlock(TileSize, TileSize);
dim3 dimGrid(nx/dimBlock.x, ny/dimBlock.y);
// Node Locations
double dx = (xlim2 - xlim1)/(nx - 1);
double dy = (ylim2 - ylim1)/(ny - 1);
double* x = (double*) malloc(nx * sizeof(double));
double* y = (double*) malloc(ny * sizeof(double));
gridnodes(x,y,xlim1,ylim1,dx,dy,nx,ny);
double *devicex, *devicey;
// allocate device memory for x and y
cudaMalloc((void**)&devicex,nx * sizeof(double)); // Allocating GPU memory for the x-node values
cudaMalloc((void**)&devicey,ny * sizeof(double)); // Allocating GPU memory for the y-node values
// Copy data from host to GPU
cudaMemcpy(devicex, x, nx * sizeof(double), cudaMemcpyHostToDevice); // Writing to device memory
cudaMemcpy(devicey, y, ny * sizeof(double), cudaMemcpyHostToDevice); // Writing to device memory
// level set matrices
double* mphi = (double*) malloc(gridmemory);
double* mpsix = (double*) malloc(gridmemory);
double* mpsiy = (double*) malloc(gridmemory);
double* mpsixy = (double*) malloc(gridmemory);
double *masterdphi, *masterdpsix, *masterdpsiy, *masterdpsixy;
// allocate device memory for integer grids
cudaMalloc((void**)&masterdphi,gridmemory); // Allocating GPU memory for the x-node values
cudaMalloc((void**)&masterdpsix,gridmemory); // Allocating GPU memory for the y-node values
cudaMalloc((void**)&masterdpsiy,gridmemory); // Allocating GPU memory for the y-node values
cudaMalloc((void**)&masterdpsixy,gridmemory); // Allocating GPU memory for the y-node values
double *dphi, *dpsix, *dpsiy, *dpsixy;
// allocate device memory for integer grids
cudaMalloc((void**)&dphi,gridmemory); // Allocating GPU memory for the x-node values
cudaMalloc((void**)&dpsix,gridmemory); // Allocating GPU memory for the y-node values
cudaMalloc((void**)&dpsiy,gridmemory); // Allocating GPU memory for the y-node values
cudaMalloc((void**)&dpsixy,gridmemory); // Allocating GPU memory for the y-node values
// Initializing at t = 0
allocate_levelset_matrices_CUDA<<<dimGrid, dimBlock>>>(masterdphi, masterdpsix, masterdpsiy, masterdpsixy, devicex, devicey, nx, ny); //Initializing level set matrices
allocate_levelset_matrices_CUDA<<<dimGrid, dimBlock>>>(dphi, dpsix, dpsiy, dpsixy, devicex, devicey, nx, ny); //Initializing level set matrices
// Initializing at t = 0
allocate_levelset_matrices(mphi,mpsix,mpsiy,mpsixy,x,y,nx,ny); //Initializing level set matrices
// Removing existing files with these names if any
/* remove("phi.txt");
remove("psix.txt");
remove("psiy.txt");
remove("psixy.txt");
remove("details.txt");
remove("Velocity_x.txt");
remove("Velocity_y.txt");
fileprint(mphi,mpsix,mpsiy,mpsixy,nx,ny,x,y,0.0,T_period);
ofstream details;
details.open("details.txt", ios::out | ios::app);
details<< nx << "," << ny << "," << std::fixed << std::setprecision(10) << dx << "," << dy << "," << xlim1 << "," << xlim2 << "," << ylim1 << "," << ylim2 << "," << n << "," << dt << "," << printstep;
details.close();
*/
///*
// TIME STEPPING LOOP
// If only the initial and final profiles are needed
// This section will be deleted after a proper profiler is installed on my computer - Raunak Bardia
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
float t_calcpts=0.0,t_findpts=0.0,t_update=0.0,t_mixed=0.0,t_copy=0.0,t_transfer=0.0;
auto tbegin = chrono::high_resolution_clock::now();
//
for(unsigned int t = 0; t < n; t++){
double *dxadv, *dyadv;
// allocate device memory for x and y
cudaMalloc((void**)&dxadv,gridmemory); // Allocating GPU memory for the x-node values
cudaMalloc((void**)&dyadv,gridmemory); // Allocating GPU memory for the y-node values
unsigned int *dcellx, *dcelly,*dtracker;
// allocate device memory for integer grids
cudaMalloc((void**)&dcellx,gridmemoryint); // Allocating GPU memory for the x-node values
cudaMalloc((void**)&dcelly,gridmemoryint); // Allocating GPU memory for the y-node values
cudaMalloc((void**)&dtracker,gridmemoryint); // Allocating GPU memory for the y-node values
float tempt = 0.0;
// Find the point from which advection occurs at this time step
cudaEventRecord(startEvent,0);
advection_point_cuda<<<dimGrid,dimBlock>>>(devicex,devicey,dxadv,dyadv,nx,t,dt,T_period,TileSize);
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_calcpts += tempt;
tempt = 0.0;
// Find the cell in which those advection points lie
cudaEventRecord(startEvent,0);
find_advection_point_location_cuda<<<dimGrid,dimBlock>>>(devicex,devicey,dxadv,dyadv,nx,ny,dcellx,dcelly,dtracker,xlim1,xlim2,ylim1,ylim2,TileSize);
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_findpts += tempt;
tempt = 0.0;
// Update the level set values
cudaEventRecord(startEvent,0);
update_levelset_data_cuda<<<dimGrid,dimBlock>>>(devicex, devicey, dxadv, dyadv, dcellx, dcelly, dtracker, t, dt, dphi, dpsix, dpsiy, dpsixy, masterdphi, masterdpsix, masterdpsiy,psischeme,backtrace_scheme,T_period,nx,ny,TileSize);
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_update += tempt;
tempt = 0.0;
// Create another copy to preserve data which gets modified on the fly in next loop
cudaEventRecord(startEvent,0);
devicetodevicecopy<<<dimGrid,dimBlock>>>(dphi,dpsix,dpsiy,masterdphi,masterdpsix,masterdpsiy,nx,TileSize);
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_copy += tempt;
tempt = 0.0;
// Update the mixed derivatives now for the remaining grid points
cudaEventRecord(startEvent,0);
update_mixed_derivatives<<<dimGrid,dimBlock>>>(dpsix, dpsiy, dpsixy, nx, ny, dx, dy,TileSize);
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_mixed += tempt;
cudaDeviceSynchronize();
//---------------------------------------------------------------------------------------------------------
// Feeding phi, psix, psiy and psixy values in their respective files
/* if((t+1) % printstep == 0)
{
tempt = 0.0;
cudaEventRecord(startEvent,0);
cudaMemcpy(mphi, masterdphi, gridmemory, cudaMemcpyDeviceToHost); // Writing back to host memory
cudaMemcpy(mpsix, masterdpsix, gridmemory, cudaMemcpyDeviceToHost); // Writing back to host memory
cudaMemcpy(mpsiy, masterdpsiy, gridmemoryint, cudaMemcpyDeviceToHost); // Writing back to host memory
cudaEventRecord(stopEvent,0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&tempt, startEvent, stopEvent);
t_transfer += tempt;
//cudaMemcpy(mpsixy, masterdpsixy, gridmemoryint, cudaMemcpyDeviceToHost); // Writing back to host memory
fileprint(mphi,mpsix,mpsiy,mpsixy,nx,ny,x,y,(t+1)*dt,T_period);
}
cout<< t+1;
cout<< " Time Step Completed" <<'\n';
*/
//---------------------------------------------------------------------------------------------------------
//xadv.clear();
//yadv.clear();
//tracker.clear();
//cellx.clear();
//celly.clear();
cudaFree(dxadv);
cudaFree(dyadv);
cudaFree(dcellx);
cudaFree(dcelly);
cudaFree(dtracker);
} // end of time marching loop
//*/
auto tend = chrono::high_resolution_clock::now();
float duration = chrono::duration_cast<chrono::nanoseconds>(tend-tbegin).count();
duration = duration * pow(10.0,-6);
cout << "Time taken for calculation of advection points = " << t_calcpts << '\n';
cout << "Time taken for finding location of advection points in the grid = " << t_findpts << '\n';
cout << "Time taken for hermite Update = " << t_update << '\n';
cout << "Time taken for copying the matrix = " << t_copy << '\n';
cout << "Time taken for calculation of mixed derivatives = " << t_mixed << '\n';
cout << "Time taken for transfer of level set data = " << t_transfer << '\n';
cout << "Total Duration of the Time Loop = " << duration << endl;
return 0;
}
|
11ada997a573759dbb84c7fcfaae7cec0e1f7ea6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template <typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template <typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template <typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template <typename Dtype>
__global__ void FilterBoxKernel(const int count, const float min_size, Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template <typename Dtype>
__global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template <typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const* const a, float const* const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes,
const float nms_overlap_thresh,
const float* dev_boxes,
uint64_t* dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(mshadow::Stream<gpu>* s,
const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int* keep,
int* num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = nullptr;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
FRCNN_CUDA_CHECK(hipGetLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
hipStream_t stream = mshadow::Stream<gpu>::GetStream(s);
FRCNN_CUDA_CHECK(hipMemcpyAsync(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
hipMemcpyDeviceToHost,
stream));
FRCNN_CUDA_CHECK(hipStreamSynchronize(stream));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n)
break;
uint64_t* p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template <typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template <typename xpu>
class ProposalGPUOp : public Operator {
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext& ctx,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";
Stream<xpu>* s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr =
in_data[proposal::kClsProb].dptr<real_t>() + fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = nullptr;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
hipStream_t stream = mshadow::Stream<gpu>::GetStream(s);
FRCNN_CUDA_CHECK(hipMemcpyAsync(workspace_proposals.dptr_,
&anchors[0],
sizeof(float) * anchors.size(),
hipMemcpyHostToDevice,
stream));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count,
num_anchors,
height,
width,
param_.feature_stride,
scores.dptr_,
workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipGetLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(hipMemcpyAsync(&cpu_im_info[0],
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
hipMemcpyDeviceToHost,
stream));
FRCNN_CUDA_CHECK(hipStreamSynchronize(stream));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count,
num_anchors,
height,
width,
real_height,
real_width,
cpu_im_info[0],
cpu_im_info[1],
workspace_proposals.dptr_,
bbox_deltas.dptr_,
workspace_proposals.dptr_);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, count,
num_anchors,
height,
width,
real_height,
real_width,
cpu_im_info[0],
cpu_im_info[1],
workspace_proposals.dptr_,
bbox_deltas.dptr_,
workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(hipGetLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipGetLastError());
// Copy score to a continuous memory
float* score_ptr = nullptr;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = nullptr;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipGetLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipGetLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = nullptr;
FRCNN_CUDA_CHECK(
hipMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, rpn_pre_nms_top_n,
workspace_proposals.dptr_,
order.dptr_,
workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipGetLastError());
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(
s, workspace_ordered_proposals, param_.threshold, rpn_post_nms_top_n, &_keep[0], &out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(hipMemcpyAsync(
keep, &_keep[0], sizeof(int) * _keep.size(), hipMemcpyHostToDevice, stream));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0, param_.rpn_post_nms_top_n,
workspace_ordered_proposals.dptr_,
keep,
out_size,
out.dptr_,
out_score.dptr_);
FRCNN_CUDA_CHECK(hipGetLastError());
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext& ctx,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu>* s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template <>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| 11ada997a573759dbb84c7fcfaae7cec0e1f7ea6.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include "../operator_common.h"
#include "../mshadow_op.h"
#include "./proposal-inl.h"
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template <typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template <typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template <typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template <typename Dtype>
__global__ void FilterBoxKernel(const int count, const float min_size, Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template <typename Dtype>
__global__ void CopyScoreKernel(const int count, const Dtype* dets, Dtype* score, int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template <typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const* const a, float const* const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes,
const float nms_overlap_thresh,
const float* dev_boxes,
uint64_t* dev_mask) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(mshadow::Stream<gpu>* s,
const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
const int rpn_post_nms_top_n,
int* keep,
int* num_out) {
const int threadsPerBlock = sizeof(uint64_t) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
uint64_t* mask_dev = nullptr;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(uint64_t)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev);
FRCNN_CUDA_CHECK(cudaGetLastError());
std::vector<uint64_t> mask_host(boxes_num * col_blocks);
cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s);
FRCNN_CUDA_CHECK(cudaMemcpyAsync(&mask_host[0],
mask_dev,
sizeof(uint64_t) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost,
stream));
FRCNN_CUDA_CHECK(cudaStreamSynchronize(stream));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
if (num_to_keep >= rpn_post_nms_top_n)
break;
uint64_t* p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(cudaFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template <typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
} // namespace cuda
} // namespace mshadow
namespace mxnet {
namespace op {
template <typename xpu>
class ProposalGPUOp : public Operator {
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext& ctx,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1)
<< "Sorry, multiple images each device is not implemented.";
Stream<xpu>* s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr =
in_data[proposal::kClsProb].dptr<real_t>() + fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
// set to -1 for max
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count;
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.ndim() * param_.scales.ndim());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor, param_.ratios, param_.scales, &anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = nullptr;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s);
FRCNN_CUDA_CHECK(cudaMemcpyAsync(workspace_proposals.dptr_,
&anchors[0],
sizeof(float) * anchors.size(),
cudaMemcpyHostToDevice,
stream));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(count,
num_anchors,
height,
width,
param_.feature_stride,
scores.dptr_,
workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaGetLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(cudaMemcpyAsync(&cpu_im_info[0],
im_info.dptr_,
sizeof(float) * cpu_im_info.size(),
cudaMemcpyDeviceToHost,
stream));
FRCNN_CUDA_CHECK(cudaStreamSynchronize(stream));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(count,
num_anchors,
height,
width,
real_height,
real_width,
cpu_im_info[0],
cpu_im_info[1],
workspace_proposals.dptr_,
bbox_deltas.dptr_,
workspace_proposals.dptr_);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(count,
num_anchors,
height,
width,
real_height,
real_width,
cpu_im_info[0],
cpu_im_info[1],
workspace_proposals.dptr_,
bbox_deltas.dptr_,
workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(cudaGetLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaGetLastError());
// Copy score to a continuous memory
float* score_ptr = nullptr;
FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = nullptr;
FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaGetLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(cudaGetLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = nullptr;
FRCNN_CUDA_CHECK(
cudaMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr,
Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(rpn_pre_nms_top_n,
workspace_proposals.dptr_,
order.dptr_,
workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaGetLastError());
FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(cudaFree(score_ptr));
FRCNN_CUDA_CHECK(cudaFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(
s, workspace_ordered_proposals, param_.threshold, rpn_post_nms_top_n, &_keep[0], &out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(cudaMemcpyAsync(
keep, &_keep[0], sizeof(int) * _keep.size(), cudaMemcpyHostToDevice, stream));
// copy results after nms
dimGrid.x = (param_.rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(param_.rpn_post_nms_top_n,
workspace_ordered_proposals.dptr_,
keep,
out_size,
out.dptr_,
out_score.dptr_);
FRCNN_CUDA_CHECK(cudaGetLastError());
// free temporary memory
FRCNN_CUDA_CHECK(cudaFree(keep));
FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext& ctx,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu>* s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template <>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
a8df1f21b3195aa08c7844eb50f58ed8a04a12ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstring>
#include <vector>
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "NvInfer.h"
#include "NvInferPlugin.h"
#include "plugin_utils.h"
#include "PReLUPlugin.h"
#include "spdlog/spdlog.h"
static const char* G_PRELU_TYPE = "PReLU";
static const char* G_PRELU_NAME = "PReLU_TRT"; //plugin_name = plugin_type + plugin_namespace
// CUDA: use 512 threads per block
static const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// /******** PReLU CUDA function ********/
// CUDA kernele for forward
template <typename Ftype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Ftype* slope_data,
const Ftype* in, Ftype* out,
const Ftype zero,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
if(in[index] > zero) {
out[index] = in[index];
} else {
out[index] = in[index] * slope_data[c];
}
}
}
template <typename Ftype>
hipError_t Forward_gpu(const int count, const int channels, const int dim,
const Ftype* mDeviceKernel,
const Ftype* bottom_data, Ftype* top_data,
const Ftype zero,
const int div_factor, const hipStream_t stream) {
hipLaunchKernelGGL(( PReLUForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream,
count, channels, dim, mDeviceKernel, bottom_data, top_data, zero, div_factor);
hipError_t err = hipGetLastError();
return err;
}
PReLUPlugin::PReLUPlugin(const nvinfer1::Weights *weights, int nbWeights) {
mWeights = weights[0];
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), weights[0].values, mWeights.count * type2size(mWeights.type));
}
// create the plugin at runtime from a byte stream
PReLUPlugin::PReLUPlugin(const void *data, size_t length) {
const char *d = static_cast<const char *>(data), *a = d;
read<int>(d, mNbInputChannels);
read<int>(d, mNbInputHeight);
read<int>(d, mNbInputWidth);
read<nvinfer1::DataType>(d, mDataType);
read<int64_t>(d, mWeights.count);
read<nvinfer1::DataType>(d, mWeights.type);
mWeights.values = nullptr;
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), d, mWeights.count * type2size(mWeights.type));
d = d + mWeights.count * type2size(mWeights.type);
ASSERT(d == a + length);
}
size_t PReLUPlugin::getSerializationSize() const {
return sizeof(mNbInputChannels) + sizeof(mNbInputWidth) + sizeof(mNbInputHeight) + sizeof(mDataType) +
sizeof(mWeights.count) + sizeof(mWeights.type) + mWeights.count * type2size(mWeights.type);
}
void PReLUPlugin::serialize(void *buffer) const {
char *d = static_cast<char *>(buffer), *a = d;
write(d, mNbInputChannels);
write(d, mNbInputHeight);
write(d, mNbInputWidth);
write(d, mDataType);
write(d, mWeights.count);
write(d, mWeights.type);
convertAndCopyToBuffer(d, mWeights, mWeights.type);
ASSERT(d == a + getSerializationSize());
}
PReLUPlugin::~PReLUPlugin() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
hipFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
int PReLUPlugin::getNbOutputs() const {
return 1;
}
nvinfer1::Dims PReLUPlugin::getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) {
if(index == 0) {
return nvinfer1::Dims3(inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]);
} // else if(index == n) {
// for other outputs if exists.
// }
else {
ASSERT(false);
}
}
bool PReLUPlugin::supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
return (type == nvinfer1::DataType::kFLOAT | type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW;
}
void PReLUPlugin::configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format,
int maxBatchSize) {
ASSERT((type == nvinfer1::DataType::kFLOAT | type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW);
mNbInputChannels = inputDims[0].d[0];
mNbInputHeight = inputDims[0].d[1];
mNbInputWidth = inputDims[0].d[2];
mDataType = type;
}
int PReLUPlugin::initialize() {
convertAndCopyToDeivce(mDeviceKernel, mWeights, mDataType);
return 0;
}
void PReLUPlugin::terminate() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
hipFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
size_t PReLUPlugin::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int PReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream)
{
const int count = batchSize * mNbInputChannels * mNbInputWidth * mNbInputHeight;
const int channels = mNbInputChannels;
const int dim = mNbInputWidth * mNbInputHeight;
const int div_factor = 1;
if (mDataType == nvinfer1::DataType::kFLOAT)
{
const float zerof{0.0f};
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const float *>(mDeviceKernel),
reinterpret_cast<const float *>(inputs[0]),
reinterpret_cast<float *>(outputs[0]),
zerof,
div_factor,
stream));
} else {
const __half zeroh = __half(0.0f);
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const __half *>(mDeviceKernel),
reinterpret_cast<const __half *>(inputs[0]),
reinterpret_cast<__half *>(outputs[0]),
zeroh,
div_factor,
stream));
}
return 0;
}
const char *PReLUPlugin::getPluginType() const {
return G_PRELU_TYPE;
}
const char *PReLUPlugin::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
void PReLUPlugin::destroy() {
delete this;
}
nvinfer1::IPluginV2* PReLUPlugin::clone() const {
return new PReLUPlugin(&mWeights, 1);
}
const char* PReLUPlugin::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
PReLUPluginCreator::PReLUPluginCreator() {
mPluginAttributes.emplace_back(nvinfer1::PluginField("weights", nullptr, nvinfer1::PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(nvinfer1::PluginField("nbWeight", nullptr, nvinfer1::PluginFieldType::kINT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
// return PRELU_PLUGIN_TYPE + PRELU_PLUGIN_NAMESPACE
const char* PReLUPluginCreator::getPluginName() const {
// std::string plugin_type{G_PRELU_TYPE};
// std::string plugin_namespace{G_PLUGIN_NAMESPACE};
// return (plugin_type+plugin_namespace).c_str();
return G_PRELU_NAME;
}
const char* PReLUPluginCreator::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
const nvinfer1::PluginFieldCollection* PReLUPluginCreator::getFieldNames() {
return &mFC;
}
nvinfer1::IPluginV2* PReLUPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) {
int nbWeights;
std::vector<float> weightValues;
const nvinfer1::PluginField* fields = fc->fields;
for (int i=0; i<fc->nbFields; i++) {
const char* attrName = fields[i].name;
if(strcmp(attrName, "nbWeights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kINT32);
nbWeights = *(static_cast<const int*>(fields[i].data));
}
if(strcmp(attrName, "weights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kFLOAT32);
weightValues.reserve(fields[i].length);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < weightValues.size(); j++)
{
weightValues.push_back(*w);
w++;
}
}
}
nvinfer1::Weights weights{nvinfer1::DataType::kFLOAT, weightValues.data(), (int64_t)weightValues.size()};
return new PReLUPlugin(&weights,nbWeights);
}
// deserialization plugin implementation
nvinfer1::IPluginV2* PReLUPluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) {
return new PReLUPlugin(serialData, serialLength);
}
const char* PReLUPluginCreator::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
REGISTER_TENSORRT_PLUGIN(PReLUPluginCreator); // DO NOT FORGET THIS
//
| a8df1f21b3195aa08c7844eb50f58ed8a04a12ce.cu | #include <cstring>
#include <vector>
#include "cuda_runtime.h"
#include "cuda_fp16.h"
#include "NvInfer.h"
#include "NvInferPlugin.h"
#include "plugin_utils.h"
#include "PReLUPlugin.h"
#include "spdlog/spdlog.h"
static const char* G_PRELU_TYPE = "PReLU";
static const char* G_PRELU_NAME = "PReLU_TRT"; //plugin_name = plugin_type + plugin_namespace
// CUDA: use 512 threads per block
static const int CUDA_NUM_THREADS = 512;
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// /******** PReLU CUDA function ********/
// CUDA kernele for forward
template <typename Ftype>
__global__ void PReLUForward(const int n, const int channels, const int dim,
const Ftype* slope_data,
const Ftype* in, Ftype* out,
const Ftype zero,
const int div_factor) {
CUDA_KERNEL_LOOP(index, n) {
int c = (index / dim) % channels / div_factor;
if(in[index] > zero) {
out[index] = in[index];
} else {
out[index] = in[index] * slope_data[c];
}
}
}
template <typename Ftype>
cudaError_t Forward_gpu(const int count, const int channels, const int dim,
const Ftype* mDeviceKernel,
const Ftype* bottom_data, Ftype* top_data,
const Ftype zero,
const int div_factor, const cudaStream_t stream) {
PReLUForward<<<CAFFE_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>
(count, channels, dim, mDeviceKernel, bottom_data, top_data, zero, div_factor);
cudaError_t err = cudaGetLastError();
return err;
}
PReLUPlugin::PReLUPlugin(const nvinfer1::Weights *weights, int nbWeights) {
mWeights = weights[0];
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), weights[0].values, mWeights.count * type2size(mWeights.type));
}
// create the plugin at runtime from a byte stream
PReLUPlugin::PReLUPlugin(const void *data, size_t length) {
const char *d = static_cast<const char *>(data), *a = d;
read<int>(d, mNbInputChannels);
read<int>(d, mNbInputHeight);
read<int>(d, mNbInputWidth);
read<nvinfer1::DataType>(d, mDataType);
read<int64_t>(d, mWeights.count);
read<nvinfer1::DataType>(d, mWeights.type);
mWeights.values = nullptr;
mWeights.values = malloc(mWeights.count * type2size(mWeights.type));
memcpy(const_cast<void *>(mWeights.values), d, mWeights.count * type2size(mWeights.type));
d = d + mWeights.count * type2size(mWeights.type);
ASSERT(d == a + length);
}
size_t PReLUPlugin::getSerializationSize() const {
return sizeof(mNbInputChannels) + sizeof(mNbInputWidth) + sizeof(mNbInputHeight) + sizeof(mDataType) +
sizeof(mWeights.count) + sizeof(mWeights.type) + mWeights.count * type2size(mWeights.type);
}
void PReLUPlugin::serialize(void *buffer) const {
char *d = static_cast<char *>(buffer), *a = d;
write(d, mNbInputChannels);
write(d, mNbInputHeight);
write(d, mNbInputWidth);
write(d, mDataType);
write(d, mWeights.count);
write(d, mWeights.type);
convertAndCopyToBuffer(d, mWeights, mWeights.type);
ASSERT(d == a + getSerializationSize());
}
PReLUPlugin::~PReLUPlugin() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
cudaFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
int PReLUPlugin::getNbOutputs() const {
return 1;
}
nvinfer1::Dims PReLUPlugin::getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) {
if(index == 0) {
return nvinfer1::Dims3(inputs[0].d[0],inputs[0].d[1],inputs[0].d[2]);
} // else if(index == n) {
// for other outputs if exists.
// }
else {
ASSERT(false);
}
}
bool PReLUPlugin::supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const {
return (type == nvinfer1::DataType::kFLOAT | type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW;
}
void PReLUPlugin::configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs,
const nvinfer1::Dims* outputDims, int nbOutputs,
nvinfer1::DataType type, nvinfer1::PluginFormat format,
int maxBatchSize) {
ASSERT((type == nvinfer1::DataType::kFLOAT | type == nvinfer1::DataType::kHALF)
&& format == nvinfer1::PluginFormat::kNCHW);
mNbInputChannels = inputDims[0].d[0];
mNbInputHeight = inputDims[0].d[1];
mNbInputWidth = inputDims[0].d[2];
mDataType = type;
}
int PReLUPlugin::initialize() {
convertAndCopyToDeivce(mDeviceKernel, mWeights, mDataType);
return 0;
}
void PReLUPlugin::terminate() {
if (mWeights.values)
{
free(const_cast<void *>(mWeights.values));
mWeights.values = nullptr;
}
if (mDeviceKernel)
{
cudaFree(mDeviceKernel);
mDeviceKernel = nullptr;
}
}
size_t PReLUPlugin::getWorkspaceSize(int maxBatchSize) const
{
return 0;
}
int PReLUPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream)
{
const int count = batchSize * mNbInputChannels * mNbInputWidth * mNbInputHeight;
const int channels = mNbInputChannels;
const int dim = mNbInputWidth * mNbInputHeight;
const int div_factor = 1;
if (mDataType == nvinfer1::DataType::kFLOAT)
{
const float zerof{0.0f};
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const float *>(mDeviceKernel),
reinterpret_cast<const float *>(inputs[0]),
reinterpret_cast<float *>(outputs[0]),
zerof,
div_factor,
stream));
} else {
const __half zeroh = __half(0.0f);
CUDA_CHECK(Forward_gpu(count, channels, dim,
reinterpret_cast<const __half *>(mDeviceKernel),
reinterpret_cast<const __half *>(inputs[0]),
reinterpret_cast<__half *>(outputs[0]),
zeroh,
div_factor,
stream));
}
return 0;
}
const char *PReLUPlugin::getPluginType() const {
return G_PRELU_TYPE;
}
const char *PReLUPlugin::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
void PReLUPlugin::destroy() {
delete this;
}
nvinfer1::IPluginV2* PReLUPlugin::clone() const {
return new PReLUPlugin(&mWeights, 1);
}
const char* PReLUPlugin::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
PReLUPluginCreator::PReLUPluginCreator() {
mPluginAttributes.emplace_back(nvinfer1::PluginField("weights", nullptr, nvinfer1::PluginFieldType::kFLOAT32, 1));
mPluginAttributes.emplace_back(nvinfer1::PluginField("nbWeight", nullptr, nvinfer1::PluginFieldType::kINT32, 1));
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
// return PRELU_PLUGIN_TYPE + PRELU_PLUGIN_NAMESPACE
const char* PReLUPluginCreator::getPluginName() const {
// std::string plugin_type{G_PRELU_TYPE};
// std::string plugin_namespace{G_PLUGIN_NAMESPACE};
// return (plugin_type+plugin_namespace).c_str();
return G_PRELU_NAME;
}
const char* PReLUPluginCreator::getPluginVersion() const {
return G_PLUGIN_VERSION;
}
const nvinfer1::PluginFieldCollection* PReLUPluginCreator::getFieldNames() {
return &mFC;
}
nvinfer1::IPluginV2* PReLUPluginCreator::createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) {
int nbWeights;
std::vector<float> weightValues;
const nvinfer1::PluginField* fields = fc->fields;
for (int i=0; i<fc->nbFields; i++) {
const char* attrName = fields[i].name;
if(strcmp(attrName, "nbWeights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kINT32);
nbWeights = *(static_cast<const int*>(fields[i].data));
}
if(strcmp(attrName, "weights")) {
ASSERT(fields[i].type == nvinfer1::PluginFieldType::kFLOAT32);
weightValues.reserve(fields[i].length);
const auto* w = static_cast<const float*>(fields[i].data);
for (int j = 0; j < weightValues.size(); j++)
{
weightValues.push_back(*w);
w++;
}
}
}
nvinfer1::Weights weights{nvinfer1::DataType::kFLOAT, weightValues.data(), (int64_t)weightValues.size()};
return new PReLUPlugin(&weights,nbWeights);
}
// deserialization plugin implementation
nvinfer1::IPluginV2* PReLUPluginCreator::deserializePlugin(const char *layerName, const void *serialData, size_t serialLength) {
return new PReLUPlugin(serialData, serialLength);
}
const char* PReLUPluginCreator::getPluginNamespace() const {
return G_PLUGIN_NAMESPACE;
}
REGISTER_TENSORRT_PLUGIN(PReLUPluginCreator); // DO NOT FORGET THIS
// 别忘了这个
|
68b0d44b2e8fc9585919a9e368252171bb1f99f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <cassert>
#include <iostream>
using namespace std;
const int Tile_size = 2;
// Compute C = A * B
//*************************************************************
//Kernel for shared memory/ Tiled execution
__global__ void matrixMul(double *A, double *X, double *B, long rows, long cols){
int tid= threadIdx.x + blockIdx.x * blockDim.x;
double sum= 0;
if(tid < rows){
for(int i=0; i < cols; i++){
sum += X[i] * A[(i * rows) + tid];
}
B[tid]=sum;
}
}
//*************************************************************
void Print_Mat(int Row,int Col,double * Mat){//Function To print the Matrix
for(int i=0; i < Row; ++i){
for(int j=0; j < Col; ++j){
cout << Mat[i * Row + j] << " ";
}
cout << endl;
}
}//Function close
//*************************************************************
//Normal CPU Matrix Multiplication
void matMultiplyOnHost(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns){
for (int i=0; i < numARows; i ++){
for (int j= 0; j < numAColumns; j++){
C[i*numCColumns + j ] = 0.0;
for (int k = 0; k < numCColumns; k++){
C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j];
}
}
}
}
//*************************************************************
extern "C++" void generate_b_gpu(double *hostA, double *hostX, double *hostB, long cols, long rows) {
double *deviceA;
double *deviceB;
double *deviceX;
// Allocating GPU memory
assert(hipSuccess == hipMalloc((void **)&deviceA, sizeof(double)*cols*rows));
assert(hipSuccess == hipMalloc((void **)&deviceB, sizeof(double)*rows));
assert(hipSuccess == hipMalloc((void **)&deviceX, sizeof(double)*rows));
// Copy memory to the GPU
assert(hipSuccess == hipMemcpy(deviceA, hostA, sizeof(double)*cols*rows, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(deviceX, hostX, sizeof(float)*rows, hipMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((1/Tile_size) + 1, (rows/Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceX, deviceB, rows, cols);
hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
assert(hipSuccess == hipMemcpy(hostB, deviceB, sizeof(float)*rows, hipMemcpyDeviceToHost));
cout << "GPU A" << endl;
Print_Mat(rows, cols, hostA);
//matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//printf("\nMatrix C From Host\n");
//Print_Mat(numCRows,numCColumns,hostComputedC);//Function Call
//printf("\n Number of Blocks Created:%d \n",((1/Tile_size) + 1)*((1/Tile_size) + 1));
//printf("\n Number of Threads Per Block: %d \n",(Tile_size*Tile_size));
// Free the GPU memory
assert(hipSuccess == hipFree(deviceA));
assert(hipSuccess == hipFree(deviceB));
assert(hipSuccess == hipFree(deviceX));
//Free the Pointer Memory
//free(hostA);
//free(hostB);
//free(hostX);
} | 68b0d44b2e8fc9585919a9e368252171bb1f99f2.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <cassert>
#include <iostream>
using namespace std;
const int Tile_size = 2;
// Compute C = A * B
//*************************************************************
//Kernel for shared memory/ Tiled execution
__global__ void matrixMul(double *A, double *X, double *B, long rows, long cols){
int tid= threadIdx.x + blockIdx.x * blockDim.x;
double sum= 0;
if(tid < rows){
for(int i=0; i < cols; i++){
sum += X[i] * A[(i * rows) + tid];
}
B[tid]=sum;
}
}
//*************************************************************
void Print_Mat(int Row,int Col,double * Mat){//Function To print the Matrix
for(int i=0; i < Row; ++i){
for(int j=0; j < Col; ++j){
cout << Mat[i * Row + j] << " ";
}
cout << endl;
}
}//Function close
//*************************************************************
//Normal CPU Matrix Multiplication
void matMultiplyOnHost(float * A, float * B, float * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns){
for (int i=0; i < numARows; i ++){
for (int j= 0; j < numAColumns; j++){
C[i*numCColumns + j ] = 0.0;
for (int k = 0; k < numCColumns; k++){
C[i*numCColumns + j ] += A[i*numAColumns + k] * B [k*numBColumns + j];
}
}
}
}
//*************************************************************
extern "C++" void generate_b_gpu(double *hostA, double *hostX, double *hostB, long cols, long rows) {
double *deviceA;
double *deviceB;
double *deviceX;
// Allocating GPU memory
assert(cudaSuccess == cudaMalloc((void **)&deviceA, sizeof(double)*cols*rows));
assert(cudaSuccess == cudaMalloc((void **)&deviceB, sizeof(double)*rows));
assert(cudaSuccess == cudaMalloc((void **)&deviceX, sizeof(double)*rows));
// Copy memory to the GPU
assert(cudaSuccess == cudaMemcpy(deviceA, hostA, sizeof(double)*cols*rows, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(deviceX, hostX, sizeof(float)*rows, cudaMemcpyHostToDevice));
// Initialize the grid and block dimensions
dim3 dimGrid((1/Tile_size) + 1, (rows/Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
//@@ Launch the GPU Kernel here
matrixMul<<<dimGrid, dimBlock>>>(deviceA, deviceX, deviceB, rows, cols);
cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
assert(cudaSuccess == cudaMemcpy(hostB, deviceB, sizeof(float)*rows, cudaMemcpyDeviceToHost));
cout << "GPU A" << endl;
Print_Mat(rows, cols, hostA);
//matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//printf("\nMatrix C From Host\n");
//Print_Mat(numCRows,numCColumns,hostComputedC);//Function Call
//printf("\n Number of Blocks Created:%d \n",((1/Tile_size) + 1)*((1/Tile_size) + 1));
//printf("\n Number of Threads Per Block: %d \n",(Tile_size*Tile_size));
// Free the GPU memory
assert(cudaSuccess == cudaFree(deviceA));
assert(cudaSuccess == cudaFree(deviceB));
assert(cudaSuccess == cudaFree(deviceX));
//Free the Pointer Memory
//free(hostA);
//free(hostB);
//free(hostX);
} |
7f7a5923140847b7d289c9f9d9eb0c61c0dfdd5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#define X 0
#define Y 1
#define Z 2
#define nump 12
#define NUM_POINTS (nump * nump * nump)
#define PI 3.141592653589793
#define R 0.2
#define dev 3
#define INIT_WIDTH 750
#define INIT_HEIGHT 750
#define GRAV 10000
#define dist 2
#define vision 20
#define Grid_x nump
#define Grid_y nump
#define Grid_z 1
#define Block_x nump
#define Block_y 1
#define Block_z 1
unsigned int num_points = (dev + 1) * (dev + 1);
unsigned int window_width = 150;
unsigned int window_height = 150;
double init_left = -10000;
double init_right = 10000;
double init_bottom = -10000;
double init_top = 10000;
double left, right, bottom, top;
float h_point[NUM_POINTS][3];
float v_point[NUM_POINTS][3];
float anim_time = 0.0f;
float anim_dt = 0.00000001f;
double phi = 30.0;
double theta = 30.0;
float light_pos[4];
int mouse_old_x, mouse_old_y;
bool motion_p;
double eye[3];
double center[3] = {0.0, 0.0, 0.0};
double up[3];
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
__global__ void grav_v(float (*pos)[3], float(*vec)[3] , float time, float dt);
__global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt);
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
__global__ void grav_v(float (*pos)[3], float(*vec)[3] , float time, float dt)
{
double xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
dis = sqrt(sq);
if (dis > dist * R)
{
vx = vx + (pos[i][0]-xn)/dis/sq*R*R*R*GRAV;
vy = vy + (pos[i][1]-yn)/dis/sq*R*R*R*GRAV;
vz = vz + (pos[i][2]-zn)/dis/sq*R*R*R*GRAV;
}
}
vec[index][0] = vx;
vec[index][1] = vy;
vec[index][2] = vz;
}
__global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt)
{
double xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
pos[index][0] = xn + vx * dt;
pos[index][1] = yn + vy * dt;
pos[index][2] = zn + vz * dt;
}
void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] , float time, float dt)
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos, vec, time, dt);
hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos, vec, time, dt);
}
//
void setInitialPosition(void)
{
unsigned int i, j, k ;
unsigned int count = 0;
for (i = 0; i < nump; i++) {
for (j = 0; j < nump; j++) {
for (k = 0; k < nump; k++) {
h_point[count][0] = -(nump * R) + (double)i * 2 * R ;
h_point[count][1] = -(nump * R) + (double)j * 2 * R ;
h_point[count][2] = -(nump * R) + (double)k * 2 * R ;
count++;
}
}
}
for (i = 0; i < NUM_POINTS; i++) {
v_point[i][0] = 0;
v_point[i][1] = 0;
v_point[i][2] = 0;
}
/**/
checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice));
}
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point , anim_time, anim_dt);
checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost));
anim_time += anim_dt;
}
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
//
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
//
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void display(void)
{
double nrml_vec[3];
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 1.0f;
runGPUKernel();
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_pos);
glEnable(GL_LIGHTING);
glMatrixMode(GL_PROJECTION);
glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000);
glLoadIdentity();
glOrtho(-vision, vision, -vision, vision, -1000, 1000);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
#pragma omp parallel for
for (int k = 0 ; k < NUM_POINTS ; k++)
{
for (int i = 0 ; i < dev + 1 ; i ++)
{
for (int j = 0 ; j < 2 * dev + 1 ; j++)
{
normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec);
glNormal3dv(nrml_vec);
glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]);
glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]);
}
}
}
glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void resize (int width, int height)
{
window_width = width;
window_height = height;
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
double yangle,zangle;
double r;
point = (double **)malloc(sizeof(double *) * num_points);
for (int i = 0 ; i < num_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
for (int i = 0 ; i < dev + 1; i ++)
{
zangle = i * PI / dev;
r=R * sin(zangle);
for (int j = 0 ; j < dev + 1; j++)
{
yangle=j * PI * 2 / dev;
point[i * dev + j][X] = r * sin(yangle);
point[i * dev + j][Y] = r * cos(yangle);
point[i * dev + j][Z] = R * cos(zangle);
}
}
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(INIT_WIDTH, INIT_HEIGHT);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
hipFree(d_point);
hipFree(dv_point);
hipDeviceReset();
for (int i = 0 ; i < num_points ; i++)
{
free (point[i]);
}
free (point);
return 0;
}
| 7f7a5923140847b7d289c9f9d9eb0c61c0dfdd5b.cu | #include <stdio.h>
#include <stdlib.h>
#include <GL/gl.h>
#include <GL/glut.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#include <cuda.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#define X 0
#define Y 1
#define Z 2
#define nump 12
#define NUM_POINTS (nump * nump * nump)
#define PI 3.141592653589793
#define R 0.2
#define dev 3
#define INIT_WIDTH 750
#define INIT_HEIGHT 750
#define GRAV 10000
#define dist 2
#define vision 20
#define Grid_x nump
#define Grid_y nump
#define Grid_z 1
#define Block_x nump
#define Block_y 1
#define Block_z 1
unsigned int num_points = (dev + 1) * (dev + 1);
unsigned int window_width = 150;
unsigned int window_height = 150;
double init_left = -10000;
double init_right = 10000;
double init_bottom = -10000;
double init_top = 10000;
double left, right, bottom, top;
float h_point[NUM_POINTS][3];
float v_point[NUM_POINTS][3];
float anim_time = 0.0f;
float anim_dt = 0.00000001f;
double phi = 30.0;
double theta = 30.0;
float light_pos[4];
int mouse_old_x, mouse_old_y;
bool motion_p;
double eye[3];
double center[3] = {0.0, 0.0, 0.0};
double up[3];
double ** point;
float (*d_point)[3];
float (*dv_point)[3];
__global__ void grav_v(float (*pos)[3], float(*vec)[3] , float time, float dt);
__global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt);
double dot(double vec0[], double vec1[])
{
return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]);
}
void cross(double vec0[], double vec1[], double vec2[])
{
vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y];
vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z];
vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X];
}
void normVec(double vec[])
{
double norm;
norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]);
vec[X] /= norm;
vec[Y] /= norm;
vec[Z] /= norm;
}
void normal(double p0[], double p1[], double p2[], double normal[])
{
unsigned int i;
double v0[3], v1[3];
for (i = 0; i < 3; i++) {
v0[i] = p2[i] - p1[i];
v1[i] = p0[i] - p1[i];
}
cross(v0, v1, normal);
normVec(normal);
}
__global__ void grav_v(float (*pos)[3], float(*vec)[3] , float time, float dt)
{
double xn,yn,zn,vx,vy,vz,dis,sq;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
for (int i = 0 ; i < NUM_POINTS; i++)
{
sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2);
dis = sqrt(sq);
if (dis > dist * R)
{
vx = vx + (pos[i][0]-xn)/dis/sq*R*R*R*GRAV;
vy = vy + (pos[i][1]-yn)/dis/sq*R*R*R*GRAV;
vz = vz + (pos[i][2]-zn)/dis/sq*R*R*R*GRAV;
}
}
vec[index][0] = vx;
vec[index][1] = vy;
vec[index][2] = vz;
}
__global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt)
{
double xn,yn,zn,vx,vy,vz;
unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x;
unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y;
unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z;
unsigned int index = ( blockDim.x * (Grid_x - 1) + blockDim.x ) * ( blockDim.y * (Grid_y - 1) + blockDim.y ) * thread_idz + ( blockDim.x * (Grid_x - 1) + blockDim.x ) * thread_idy + thread_idx ;
xn = pos[index][0];
yn = pos[index][1];
zn = pos[index][2];
vx = vec[index][0];
vy = vec[index][1];
vz = vec[index][2];
pos[index][0] = xn + vx * dt;
pos[index][1] = yn + vy * dt;
pos[index][2] = zn + vz * dt;
}
void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] , float time, float dt)
{
dim3 grid(Grid_x,Grid_y,Grid_z);
dim3 block(Block_x,Block_y,Block_z);
grav_v<<<grid , block>>>(pos, vec, time, dt);
grav_p<<<grid , block>>>(pos, vec, time, dt);
}
// 粒子を初期位置に配置.
void setInitialPosition(void)
{
unsigned int i, j, k ;
unsigned int count = 0;
for (i = 0; i < nump; i++) {
for (j = 0; j < nump; j++) {
for (k = 0; k < nump; k++) {
h_point[count][0] = -(nump * R) + (double)i * 2 * R ;
h_point[count][1] = -(nump * R) + (double)j * 2 * R ;
h_point[count][2] = -(nump * R) + (double)k * 2 * R ;
count++;
}
}
}
for (i = 0; i < NUM_POINTS; i++) {
v_point[i][0] = 0;
v_point[i][1] = 0;
v_point[i][2] = 0;
}
/*デバイスメモリ領域の確保*/
checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice));
}
void runGPUKernel(void)
{
launchGPUKernel(NUM_POINTS, d_point, dv_point , anim_time, anim_dt);
checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost));
anim_time += anim_dt;
}
void defineViewMatrix(double phi, double theta)
{
unsigned int i;
double c, s, xy_dist;
double x_axis[3], y_axis[3], z_axis[3];
// 視点の設定.
eye[Z] = sin(theta * PI / 180.0);
xy_dist = cos(theta * PI / 180.0);
c = cos(phi * PI / 180.0);
s = sin(phi * PI / 180.0);
eye[X] = xy_dist * c;
eye[Y] = xy_dist * s;
up[X] = - c * eye[Z];
up[Y] = - s * eye[Z];
up[Z] = s * eye[Y] + c * eye[X];
normVec(up);
// 視点を原点とする座標系の定義.
for (i = 0; i < 3; i++)
{
z_axis[i] = eye[i] - center[i];
}
normVec(z_axis);
cross(up, z_axis, x_axis);
normVec(x_axis);
cross(z_axis, x_axis, y_axis);
gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]);
}
void display(void)
{
double nrml_vec[3];
light_pos[0] = (float)eye[X];
light_pos[1] = (float)eye[Y];
light_pos[2] = (float)eye[Z];
light_pos[3] = 1.0f;
runGPUKernel();
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_pos);
glEnable(GL_LIGHTING);
glMatrixMode(GL_PROJECTION);
glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000);
glLoadIdentity();
glOrtho(-vision, vision, -vision, vision, -1000, 1000);
glViewport(0, 0, window_width, window_height);
defineViewMatrix(phi, theta);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
#pragma omp parallel for
for (int k = 0 ; k < NUM_POINTS ; k++)
{
for (int i = 0 ; i < dev + 1 ; i ++)
{
for (int j = 0 ; j < 2 * dev + 1 ; j++)
{
normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec);
glNormal3dv(nrml_vec);
glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]);
glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]);
glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]);
}
}
}
glEnd();
glutSwapBuffers();
glutPostRedisplay();
}
void resize (int width, int height)
{
window_width = width;
window_height = height;
}
bool initGL(void)
{
glClearColor(0.0f, 0.0f , 0.0f, 0.5f);
glEnable(GL_DEPTH_TEST);
glClearDepth(1.0);
glDepthFunc(GL_LESS);
glEnable(GL_LIGHT0);
return true;
}
int main(int argc, char** argv)
{
double yangle,zangle;
double r;
point = (double **)malloc(sizeof(double *) * num_points);
for (int i = 0 ; i < num_points ; i++)
{
point[i] = (double *)malloc(sizeof(double) * 3);
}
for (int i = 0 ; i < dev + 1; i ++)
{
zangle = i * PI / dev;
r=R * sin(zangle);
for (int j = 0 ; j < dev + 1; j++)
{
yangle=j * PI * 2 / dev;
point[i * dev + j][X] = r * sin(yangle);
point[i * dev + j][Y] = r * cos(yangle);
point[i * dev + j][Z] = R * cos(zangle);
}
}
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(INIT_WIDTH, INIT_HEIGHT);
glutCreateWindow("3D CUDA Simulation");
glutDisplayFunc(display);
glutReshapeFunc(resize);
setInitialPosition();
if (!initGL())
return 1;
glutMainLoop();
cudaFree(d_point);
cudaFree(dv_point);
cudaDeviceReset();
for (int i = 0 ; i < num_points ; i++)
{
free (point[i]);
}
free (point);
return 0;
}
|
95f02b4734a1c4beac7e7464251dfab74117a4bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/deformable_psroi_pooling_op.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interpolation(const T* data, const T x, const T y,
const int width, const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernel(
const int count, const T* bottom_data, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const T* bottom_rois,
const T* bottom_trans, const bool no_trans, const T trans_std,
const int sample_per_part, const int output_dim, const int group_height,
const int group_width, const int part_height, const int part_width,
const int num_classes, const int channels_each_class, T* top_data,
T* top_count, int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval ineach bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels) * height * width;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
// bilinear interpolation
T val = bilinear_interpolation(offset_bottom_data + c * height * width,
w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
Tensor* out = ctx.Output<Tensor>("Output");
out->mutable_data<T>(ctx.GetPlace());
Tensor* top_count = ctx.Output<Tensor>("TopCount");
top_count->mutable_data<T>(ctx.GetPlace());
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(num_rois, out->dims()[0],
"number of rois should be same with number of output");
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE(channels_each_class >= 1,
"channels_each must greater than 1");
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
T* top_data = out->mutable_data<T>(ctx.GetPlace());
T* top_count_data = top_count->mutable_data<T>(ctx.GetPlace());
hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0,
dev_ctx.stream(),
count, bottom_data, (T)spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans,
(T)trans_std, sample_per_part, output_dim, group_height, group_width,
part_height, part_width, num_classes, channels_each_class, top_data,
top_count_data, roi_id_data);
}
};
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count, const T* top_diff, const T* top_count, const int num_rois,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int output_dim, T* bottom_data_diff, T* bottom_trans_diff,
const T* bottom_data, const T* bottom_rois, const T* bottom_trans,
const bool no_trans, const T trans_std, const int sample_per_part,
const int group_height, const int group_width, const int part_height,
const int part_width, const int num_classes, const int channels_each_class,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
int num_box = count / pooled_height / pooled_width / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1);
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval in each bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T* offset_bottom_data =
bottom_data + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
// compute coefficient of gradient
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
// compute gradient of input
if (bottom_data_diff) {
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y0 * width + x0,
q00 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y1 * width + x0,
q01 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y0 * width + x1,
q10 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y1 * width + x1,
q11 * diff_val);
}
// compute gradient of trans
if (no_trans || bottom_trans_diff == NULL) {
continue;
}
T u00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T u01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T u10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T u11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (u11 * dist_y + u10 * (1 - dist_y) - u01 * dist_y -
u00 * (1 - dist_y)) *
trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (u11 * dist_x + u01 * (1 - dist_x) - u10 * dist_x -
u00 * (1 - dist_x)) *
trans_std * diff_val;
diff_y *= roi_height;
platform::CudaAtomicAdd(
bottom_trans_diff +
(((n * num_classes + class_id) * 2) * part_height + part_h) *
part_width +
part_w,
diff_x);
platform::CudaAtomicAdd(
bottom_trans_diff +
(((n * num_classes + class_id) * 2 + 1) * part_height +
part_h) *
part_width +
part_w,
diff_y);
}
}
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
const Tensor* top_count = ctx.Input<Tensor>("TopCount");
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* trans_grad = ctx.Output<Tensor>(framework::GradVarName("Trans"));
math::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = ctx.cuda_device_context();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (trans_grad) {
trans_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, trans_grad, static_cast<T>(0));
}
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
const T* top_diff = output_grad->data<T>();
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
T* bottom_data_diff = NULL;
T* bottom_trans_diff = NULL;
if (input_grad) {
bottom_data_diff = input_grad->mutable_data<T>(ctx.GetPlace());
}
if (trans_grad) {
bottom_trans_diff =
no_trans ? NULL : trans_grad->mutable_data<T>(ctx.GetPlace());
}
const T* top_count_data = top_count->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS),
0, dev_ctx.stream(),
count, top_diff, top_count_data, num_rois, (T)spatial_scale, channels,
height, width, pooled_height, pooled_width, output_dim,
bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois,
bottom_trans, no_trans, (T)trans_std, sample_per_part, group_height,
group_width, part_height, part_width, num_classes, channels_each_class,
roi_id_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling,
ops::DeformablePSROIPoolCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling_grad,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, double>);
| 95f02b4734a1c4beac7e7464251dfab74117a4bb.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <limits>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/deformable_psroi_pooling_op.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename T>
__device__ T bilinear_interpolation(const T* data, const T x, const T y,
const int width, const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
T dist_x = static_cast<T>(x - x1);
T dist_y = static_cast<T>(y - y1);
T value11 = data[y1 * width + x1];
T value12 = data[y2 * width + x1];
T value21 = data[y1 * width + x2];
T value22 = data[y2 * width + x2];
T value = (1 - dist_x) * (1 - dist_y) * value11 +
(1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 +
dist_x * dist_y * value22;
return value;
}
template <typename T>
__global__ void DeformablePSROIPoolForwardKernel(
const int count, const T* bottom_data, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const T* bottom_rois,
const T* bottom_trans, const bool no_trans, const T trans_std,
const int sample_per_part, const int output_dim, const int group_height,
const int group_width, const int part_height, const int part_width,
const int num_classes, const int channels_each_class, T* top_data,
T* top_count, int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval ineach bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
T sum = 0;
int count = 0;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels) * height * width;
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
// bilinear interpolation
T val = bilinear_interpolation(offset_bottom_data + c * height * width,
w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<T>(0) : sum / count;
top_count[index] = count;
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
Tensor* out = ctx.Output<Tensor>("Output");
out->mutable_data<T>(ctx.GetPlace());
Tensor* top_count = ctx.Output<Tensor>("TopCount");
top_count->mutable_data<T>(ctx.GetPlace());
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
PADDLE_ENFORCE_EQ(num_rois, out->dims()[0],
"number of rois should be same with number of output");
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
PADDLE_ENFORCE(channels_each_class >= 1,
"channels_each must greater than 1");
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
T* top_data = out->mutable_data<T>(ctx.GetPlace());
T* top_count_data = top_count->mutable_data<T>(ctx.GetPlace());
DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0,
dev_ctx.stream()>>>(
count, bottom_data, (T)spatial_scale, channels, height, width,
pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans,
(T)trans_std, sample_per_part, output_dim, group_height, group_width,
part_height, part_width, num_classes, channels_each_class, top_data,
top_count_data, roi_id_data);
}
};
template <typename T>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count, const T* top_diff, const T* top_count, const int num_rois,
const T spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int output_dim, T* bottom_data_diff, T* bottom_trans_diff,
const T* bottom_data, const T* bottom_rois, const T* bottom_trans,
const bool no_trans, const T trans_std, const int sample_per_part,
const int group_height, const int group_width, const int part_height,
const int part_width, const int num_classes, const int channels_each_class,
int* roi_batch_id_data) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
int num_box = count / pooled_height / pooled_width / output_dim;
const T* offset_bottom_rois = bottom_rois + n * 4;
int roi_batch_ind = roi_batch_id_data[n];
// location of roi on feature map
T roi_start_w =
static_cast<T>(round(offset_bottom_rois[0])) * spatial_scale - 0.5;
T roi_start_h =
static_cast<T>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
T roi_end_w =
static_cast<T>(round(offset_bottom_rois[2]) + 1.) * spatial_scale - 0.5;
T roi_end_h =
static_cast<T>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
// width and height of roi
T roi_width = max(roi_end_w - roi_start_w, 0.1);
T roi_height = max(roi_end_h - roi_start_h, 0.1);
// width and height of each bin
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// sampling interval in each bin
T sub_bin_size_h = bin_size_h / static_cast<T>(sample_per_part);
T sub_bin_size_w = bin_size_w / static_cast<T>(sample_per_part);
// obtain offset of roi
int part_h = floor(static_cast<T>(ph) / pooled_height * part_height);
int part_w = floor(static_cast<T>(pw) / pooled_width * part_width);
int class_id = ctop / channels_each_class;
T trans_x =
no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2) * part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
T trans_y = no_trans
? static_cast<T>(0)
: bottom_trans[(((n * num_classes + class_id) * 2 + 1) *
part_height +
part_h) *
part_width +
part_w] *
static_cast<T>(trans_std);
// location of start after adding offset
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
wstart += trans_x * roi_width;
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
T diff_val = top_diff[index] / top_count[index];
const T* offset_bottom_data =
bottom_data + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<T>(pw) * group_width / pooled_width);
int gh = floor(static_cast<T>(ph) * group_height / pooled_height);
gw = min(max(gw, 0), group_width - 1);
gh = min(max(gh, 0), group_height - 1);
// sampling in each bin
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
T w = wstart + iw * sub_bin_size_w;
T h = hstart + ih * sub_bin_size_h;
if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop * group_height + gh) * group_width + gw;
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
// compute coefficient of gradient
T dist_x = w - x0, dist_y = h - y0;
T q00 = (1 - dist_x) * (1 - dist_y);
T q01 = (1 - dist_x) * dist_y;
T q10 = dist_x * (1 - dist_y);
T q11 = dist_x * dist_y;
int bottom_index_base = c * height * width;
// compute gradient of input
if (bottom_data_diff) {
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y0 * width + x0,
q00 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y1 * width + x0,
q01 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y0 * width + x1,
q10 * diff_val);
platform::CudaAtomicAdd(
bottom_data_diff + roi_batch_ind * channels * height * width +
bottom_index_base + y1 * width + x1,
q11 * diff_val);
}
// compute gradient of trans
if (no_trans || bottom_trans_diff == NULL) {
continue;
}
T u00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
T u01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
T u10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
T u11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
T diff_x = (u11 * dist_y + u10 * (1 - dist_y) - u01 * dist_y -
u00 * (1 - dist_y)) *
trans_std * diff_val;
diff_x *= roi_width;
T diff_y = (u11 * dist_x + u01 * (1 - dist_x) - u10 * dist_x -
u00 * (1 - dist_x)) *
trans_std * diff_val;
diff_y *= roi_height;
platform::CudaAtomicAdd(
bottom_trans_diff +
(((n * num_classes + class_id) * 2) * part_height + part_h) *
part_width +
part_w,
diff_x);
platform::CudaAtomicAdd(
bottom_trans_diff +
(((n * num_classes + class_id) * 2 + 1) * part_height +
part_h) *
part_width +
part_w,
diff_y);
}
}
}
}
template <typename DeviceContext, typename T>
class DeformablePSROIPoolGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const LoDTensor* rois = ctx.Input<LoDTensor>("ROIs");
const Tensor* trans = ctx.Input<Tensor>("Trans");
const Tensor* top_count = ctx.Input<Tensor>("TopCount");
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* trans_grad = ctx.Output<Tensor>(framework::GradVarName("Trans"));
math::SetConstant<DeviceContext, T> set_zero;
auto& dev_ctx = ctx.cuda_device_context();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (trans_grad) {
trans_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, trans_grad, static_cast<T>(0));
}
auto no_trans = ctx.Attr<bool>("no_trans");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto output_dim = ctx.Attr<int>("output_dim");
auto group_size = ctx.Attr<std::vector<int>>("group_size");
auto group_height = group_size[0];
auto group_width = group_size[1];
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto part_size = ctx.Attr<std::vector<int>>("part_size");
auto part_height = part_size[0];
auto part_width = part_size[1];
auto sample_per_part = ctx.Attr<int>("sample_per_part");
auto trans_std = ctx.Attr<float>("trans_std");
const int batch = static_cast<int>(input->dims()[0]);
const int channels = static_cast<int>(input->dims()[1]);
const int height = static_cast<int>(input->dims()[2]);
const int width = static_cast<int>(input->dims()[3]);
const int channels_trans = no_trans ? 2 : trans->dims()[1];
const int num_rois = rois->dims()[0];
const int count = num_rois * output_dim * pooled_height * pooled_width;
const int num_classes = no_trans ? 1 : channels_trans / 2;
const int channels_each_class =
no_trans ? output_dim : output_dim / num_classes;
const T* top_diff = output_grad->data<T>();
const T* bottom_data = input->data<T>();
const T* bottom_rois = rois->data<T>();
const T* bottom_trans = no_trans ? NULL : trans->data<T>();
T* bottom_data_diff = NULL;
T* bottom_trans_diff = NULL;
if (input_grad) {
bottom_data_diff = input_grad->mutable_data<T>(ctx.GetPlace());
}
if (trans_grad) {
bottom_trans_diff =
no_trans ? NULL : trans_grad->mutable_data<T>(ctx.GetPlace());
}
const T* top_count_data = top_count->data<T>();
framework::Tensor roi_batch_id_list;
roi_batch_id_list.Resize({num_rois});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch,
"The rois_batch_size and imgs batch_size must be the same.");
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(num_rois, rois_num_with_lod,
"The rois_num from input and lod must be the same.");
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = allocator.Allocate(bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS,
0, dev_ctx.stream()>>>(
count, top_diff, top_count_data, num_rois, (T)spatial_scale, channels,
height, width, pooled_height, pooled_width, output_dim,
bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois,
bottom_trans, no_trans, (T)trans_std, sample_per_part, group_height,
group_width, part_height, part_width, num_classes, channels_each_class,
roi_id_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CUDA = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling,
ops::DeformablePSROIPoolCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolCUDAKernel<CUDA, double>);
REGISTER_OP_CUDA_KERNEL(deformable_psroi_pooling_grad,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, float>,
ops::DeformablePSROIPoolGradCUDAKernel<CUDA, double>);
|
7b55a3efb6d599ed995526c9e37b1809fe03aab5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/orc/orc.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_vector.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float64) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
namespace {
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
hipMemcpyHostToDevice,
stream.value()));
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks +
num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
hipMemcpyHostToDevice,
stream.value()));
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data().get() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
hipMemcpyHostToDevice,
stream.value()));
gpu::ParseRowGroupIndex(row_groups.data().get(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream);
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
timezone_table const &tz_table,
const rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
rmm::cuda_stream_view stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<gpu::DictionaryEntry> global_dict(num_dicts);
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
hipMemcpyHostToDevice,
stream.value()));
gpu::DecodeNullsAndStringDictionaries(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
stream);
gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table.view(),
row_groups.data().get(),
row_groups.size() / num_columns,
row_index_stride,
stream);
CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(),
chunks.device_ptr(),
chunks.memory_size(),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr), _source(std::move(source))
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<cudf::io::orc::metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float64 = options.is_enabled_decimals_as_float64();
_decimals_as_int_scale = options.get_forced_decimals_scale();
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<size_type> &stripes,
rmm::cuda_stream_view stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(
hipMemcpyAsync(d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float64) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | orc::gpu::orc_decimal2float64_scale;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_vector<gpu::RowGroup> row_groups(num_rowgroups * num_columns);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
hipMemcpyHostToDevice,
stream.value()));
gpu::ParseRowGroupIndex(row_groups.data().get(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream);
}
}
// Setup table for converting timestamp columns from local to UTC time
auto const tz_table =
_has_timestamp_column
? build_timezone_transition_table(selected_stripes[0].second->writerTimezone)
: timezone_table{};
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table,
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->get_column_name(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const &options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
| 7b55a3efb6d599ed995526c9e37b1809fe03aab5.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
*/
#include "reader_impl.hpp"
#include "timezone.cuh"
#include <io/comp/gpuinflate.h>
#include <io/orc/orc.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_vector.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
*/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float64)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float64) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
namespace {
/**
* @brief Struct that maps ORC streams to columns
*/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
*/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
rmm::cuda_stream_view stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
cudaMemcpyHostToDevice,
stream.value()));
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks +
num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
cudaMemcpyHostToDevice,
stream.value()));
gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream);
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data().get() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream);
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
cudaMemcpyHostToDevice,
stream.value()));
gpu::ParseRowGroupIndex(row_groups.data().get(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream);
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
timezone_table const &tz_table,
const rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
rmm::cuda_stream_view stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<gpu::DictionaryEntry> global_dict(num_dicts);
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
cudaMemcpyHostToDevice,
stream.value()));
gpu::DecodeNullsAndStringDictionaries(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
stream);
gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table.view(),
row_groups.data().get(),
row_groups.size() / num_columns,
row_index_stride,
stream);
CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(),
chunks.device_ptr(),
chunks.memory_size(),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _mr(mr), _source(std::move(source))
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<cudf::io::orc::metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.get_timestamp_type().id() != type_id::EMPTY) {
_timestamp_type = options.get_timestamp_type();
}
// Enable or disable attempt to use row index for parsing
_use_index = options.is_enabled_use_index();
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.is_enabled_use_np_dtypes();
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float64 = options.is_enabled_decimals_as_float64();
_decimals_as_int_scale = options.get_forced_decimals_scale();
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
const std::vector<size_type> &stripes,
rmm::cuda_stream_view stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only stripes required (aka row groups)
const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float64);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.empty()) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(
cudaMemcpyAsync(d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream.value()));
stream.synchronize();
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float64) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | orc::gpu::orc_decimal2float64_scale;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_vector<gpu::RowGroup> row_groups(num_rowgroups * num_columns);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
cudaMemcpyHostToDevice,
stream.value()));
gpu::ParseRowGroupIndex(row_groups.data().get(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream);
}
}
// Setup table for converting timestamp columns from local to UTC time
auto const tz_table =
_has_timestamp_column
? build_timezone_transition_table(selected_stripes[0].second->writerTimezone)
: timezone_table{};
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table,
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(make_column(out_buffers[i], nullptr, stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->get_column_name(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::vector<std::string> const &filepaths,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(filepaths.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(datasource::create(filepaths[0]), options, mr);
}
// Forward to implementation
reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>> &&sources,
orc_reader_options const &options,
rmm::mr::device_memory_resource *mr)
{
CUDF_EXPECTS(sources.size() == 1, "Only a single source is currently supported.");
_impl = std::make_unique<impl>(std::move(sources[0]), options, mr);
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read(orc_reader_options const &options, rmm::cuda_stream_view stream)
{
return _impl->read(
options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
0ef67c0a19bdd4035e0103bbc1ff5dd89fc12b6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "opencv2/core/cuda/warp.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_OBJDETECT
# include "opencv2/objdetect.hpp"
# include "opencv2/objdetect/objdetect_c.h"
#endif
#include "opencv2/gpulegacy/NCV.hpp"
#include "opencv2/gpulegacy/NPP_staging.hpp"
#include "opencv2/gpulegacy/NCVHaarObjectDetection.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVAlg.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
__device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::gpu::cudev::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const Ncv32u n = cv::gpu::cudev::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
__device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <Ncv32u tiNumScanThreads>
__device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
Ncv32u warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
Ncv32u val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, hipReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, hipReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32u d_outMaskPosition;
__device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem);
__syncthreads();
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
if (threadPassFlag)
{
Ncv32u excScan = incScan - threadPassFlag;
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction)
{
Ncv32f pixelStdDev = 0.0f;
if (!bInactiveThread)
pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
if (bPass && !bInactiveThread)
{
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
hipStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
hipLaunchKernelGGL(( initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >)
, dim3(gridConf), dim3(blockConf), 0, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, hipStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(integral.memType() == d_weights.memType()&&
integral.memType() == d_pixelMask.memType() &&
integral.memType() == gpuAllocator.memType() &&
(integral.memType() == NCVMemoryTypeDevice ||
integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
integral.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = integral.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length()));
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
hipChannelFormatDesc cfdTexIImage;
cfdTexIImage = hipCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
hipChannelFormatDesc cfdTexHaarFeatures;
hipChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = hipCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
hipStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
hipDeviceProp_t &devProp,
hipStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
integral.ptr(), integral.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
integral.ptr(), integral.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
static_cast<Ncv32u>(d_hypothesesIntermediate.length()),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = static_cast<Ncv32u>(d_dstRects.length());
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
static NCVStatus loadFromXML(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
#ifndef HAVE_OPENCV_OBJDETECT
(void) filename;
(void) haar;
(void) haarStages;
(void) haarClassifierNodes;
(void) haarFeatures;
CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module");
return NCV_HAAR_XML_LOADING_EXCEPTION;
#else
NCVStatus ncvStat;
haar.NumStages = 0;
haar.NumClassifierRootNodes = 0;
haar.NumClassifierTotalNodes = 0;
haar.NumFeatures = 0;
haar.ClassifierSize.width = 0;
haar.ClassifierSize.height = 0;
haar.bHasStumpsOnly = true;
haar.bNeedsTiltedII = false;
Ncv32u curMaxTreeDepth = 0;
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
haarStages.resize(0);
haarClassifierNodes.resize(0);
haarFeatures.resize(0);
cv::Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
if (oldCascade.empty())
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
int stagesCound = oldCascade->count;
for(int s = 0; s < stagesCound; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
int treesCount = oldCascade->stage_classifier[s].count;
for(int t = 0; t < treesCount; ++t) // by trees
{
Ncv32u nodeId = 0;
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
int nodesCount = tree->count;
for(int n = 0; n < nodesCount; ++n) //by features
{
CvHaarFeature* feature = &tree->haar_feature[n];
HaarClassifierNode128 curNode;
curNode.setThreshold(tree->threshold[n]);
NcvBool bIsLeftNodeLeaf = false;
NcvBool bIsRightNodeLeaf = false;
HaarClassifierNodeDescriptor32 nodeLeft;
if ( tree->left[n] <= 0 )
{
Ncv32f leftVal = tree->alpha[-tree->left[n]];
ncvStat = nodeLeft.create(leftVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsLeftNodeLeaf = true;
}
else
{
Ncv32u leftNodeOffset = tree->left[n];
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight;
if ( tree->right[n] <= 0 )
{
Ncv32f rightVal = tree->alpha[-tree->right[n]];
ncvStat = nodeRight.create(rightVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsRightNodeLeaf = true;
}
else
{
Ncv32u rightNodeOffset = tree->right[n];
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setRightNodeDesc(nodeRight);
Ncv32u tiltedVal = feature->tilted;
haar.bNeedsTiltedII = (tiltedVal != 0);
Ncv32u featureId = 0;
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
{
Ncv32u rectX = feature->rect[l].r.x;
Ncv32u rectY = feature->rect[l].r.y;
Ncv32u rectWidth = feature->rect[l].r.width;
Ncv32u rectHeight = feature->rect[l].r.height;
Ncv32f rectWeight = feature->rect[l].weight;
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
break;
HaarFeature64 curFeature;
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
curFeature.setWeight(rectWeight);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
haarFeatures.push_back(curFeature);
featureId++;
}
HaarFeatureDescriptor32 tmpFeatureDesc;
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
curNode.setFeatureDesc(tmpFeatureDesc);
if (!nodeId)
{
//root node
haarClassifierNodes.push_back(curNode);
curMaxTreeDepth = 1;
}
else
{
//other node
h_TmpClassifierNotRootNodes.push_back(curNode);
curMaxTreeDepth++;
}
nodeId++;
}
}
curStage.setNumClassifierRootNodes(treesCount);
haarStages.push_back(curStage);
}
//fill in cascade stats
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
//merge root and leaf nodes in one classifiers array
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
}
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
}
return NCV_SUCCESS;
#endif
}
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
size_t readCount;
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
readCount = fread(&fsize, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
readCount = fread(&fdata[0], fsize, 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
size_t readCount;
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
readCount = fread(&numStages, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&tmp, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
| 0ef67c0a19bdd4035e0103bbc1ff5dd89fc12b6b.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
////////////////////////////////////////////////////////////////////////////////
//
// NVIDIA CUDA implementation of Viola-Jones Object Detection Framework
//
// The algorithm and code are explained in the upcoming GPU Computing Gems
// chapter in detail:
//
// Anton Obukhov, "Haar Classifiers for Object Detection with CUDA"
// PDF URL placeholder
// email: [email protected], [email protected]
//
// Credits for help with the code to:
// Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov.
//
////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <cstdio>
#include "opencv2/core/cuda/warp.hpp"
#include "opencv2/core/cuda/warp_shuffle.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_OBJDETECT
# include "opencv2/objdetect.hpp"
# include "opencv2/objdetect/objdetect_c.h"
#endif
#include "opencv2/gpulegacy/NCV.hpp"
#include "opencv2/gpulegacy/NPP_staging.hpp"
#include "opencv2/gpulegacy/NCVHaarObjectDetection.hpp"
#include "NCVRuntimeTemplates.hpp"
#include "NCVAlg.hpp"
//==============================================================================
//
// BlockScan file
//
//==============================================================================
NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE and size is power of 2
__device__ Ncv32u warpScanInclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
#if __CUDA_ARCH__ >= 300
const unsigned int laneId = cv::gpu::cudev::Warp::laneId();
// scan on shuffl functions
#pragma unroll
for (int i = 1; i <= (K_WARP_SIZE / 2); i *= 2)
{
const Ncv32u n = cv::gpu::cudev::shfl_up(idata, i);
if (laneId >= i)
idata += n;
}
return idata;
#else
Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1));
s_Data[pos] = 0;
pos += K_WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
#endif
}
__device__ __forceinline__ Ncv32u warpScanExclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
return warpScanInclusive(idata, s_Data) - idata;
}
template <Ncv32u tiNumScanThreads>
__device__ Ncv32u scan1Inclusive(Ncv32u idata, volatile Ncv32u *s_Data)
{
if (tiNumScanThreads > K_WARP_SIZE)
{
//Bottom-level inclusive warp scan
Ncv32u warpResult = warpScanInclusive(idata, s_Data);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) )
{
s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult;
}
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) )
{
//grab top warp elements
Ncv32u val = s_Data[threadIdx.x];
//calculate exclusive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE];
}
else
{
return warpScanInclusive(idata, s_Data);
}
}
//==============================================================================
//
// HaarClassifierCascade file
//
//==============================================================================
const Ncv32u MAX_GRID_DIM = 65535;
const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64;
#define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6
#define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2)
/** \internal
* Haar features solid array.
*/
texture<uint2, 1, cudaReadModeElementType> texHaarFeatures;
/** \internal
* Haar classifiers flattened trees container.
* Two parts: first contains root nodes, second - nodes that are referred by root nodes.
* Drawback: breaks tree locality (might cause more cache misses
* Advantage: No need to introduce additional 32-bit field to index root nodes offsets
*/
texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes;
texture<Ncv32u, 1, cudaReadModeElementType> texIImage;
__device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages)
{
return d_Stages[iStage];
}
template <NcvBool tbCacheTextureCascade>
__device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes)
{
HaarClassifierNode128 tmpNode;
if (tbCacheTextureCascade)
{
tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode);
}
else
{
tmpNode = d_ClassifierNodes[iNode];
}
return tmpNode;
}
template <NcvBool tbCacheTextureCascade>
__device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features,
Ncv32f *weight,
Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight)
{
HaarFeature64 feature;
if (tbCacheTextureCascade)
{
feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature);
}
else
{
feature = d_Features[iFeature];
}
feature.getRect(rectX, rectY, rectWidth, rectHeight);
*weight = feature.getWeight();
}
template <NcvBool tbCacheTextureIImg>
__device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg)
{
if (tbCacheTextureIImg)
{
return tex1Dfetch(texIImage, x);
}
else
{
return d_IImg[x];
}
}
__device__ Ncv32u d_outMaskPosition;
__device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];
__shared__ Ncv32u numPassed;
__shared__ Ncv32u outMaskOffset;
Ncv32u incScan = scan1Inclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem);
__syncthreads();
if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1)
{
numPassed = incScan;
outMaskOffset = atomicAdd(&d_outMaskPosition, incScan);
}
if (threadPassFlag)
{
Ncv32u excScan = incScan - threadPassFlag;
shmem[excScan] = threadElem;
}
__syncthreads();
if (threadIdx.x < numPassed)
{
vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x];
}
#endif
}
template <NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u y_offs;
Ncv32u x_offs;
Ncv32u maskOffset;
Ncv32u outMaskVal;
NcvBool bInactiveThread = false;
if (tbReadPixelIndexFromVector)
{
maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (maskOffset >= mask1Dlen)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
outMaskVal = d_inMask[maskOffset];
y_offs = outMaskVal >> 16;
x_offs = outMaskVal & 0xFFFF;
}
}
else
{
y_offs = blockIdx.y;
x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
if (x_offs >= mask2Dstride)
{
if (tbDoAtomicCompaction) bInactiveThread = true; else return;
}
if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread)
{
maskOffset = y_offs * mask2Dstride + x_offs;
if ((x_offs >= anchorsRoi.width) ||
(!tbInitMaskPositively &&
d_inMask != d_outMask &&
d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U))
{
if (tbDoAtomicCompaction)
{
bInactiveThread = true;
}
else
{
d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U;
return;
}
}
outMaskVal = (y_offs << 16) | x_offs;
}
}
NcvBool bPass = true;
if (!tbDoAtomicCompaction || tbDoAtomicCompaction)
{
Ncv32f pixelStdDev = 0.0f;
if (!bInactiveThread)
pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++)
{
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset();
Ncv32f stageThreshold = curStage.getStageThreshold();
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u iNode = curRootNodeOffset;
if (bPass && !bInactiveThread)
{
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset++;
}
if (curStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
__syncthreads();
if (!tbDoAtomicCompaction)
{
if (!tbReadPixelIndexFromVector ||
(tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask)))
{
d_outMask[maskOffset] = outMaskVal;
}
}
else
{
compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread,
outMaskVal,
d_outMask);
}
}
template <NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction>
__global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea)
{
Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x;
if (maskOffset >= mask1Dlen)
{
return;
}
Ncv32u outMaskVal = d_inMask[maskOffset];
Ncv32u y_offs = outMaskVal >> 16;
Ncv32u x_offs = outMaskVal & 0xFFFF;
Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs];
NcvBool bPass = true;
for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++)
{
//this variable is subject to reduction
Ncv32f curStageSum = 0.0f;
HaarStage64 curStage = getStage(iStage, d_Stages);
Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x;
Ncv32f stageThreshold = curStage.getStageThreshold();
Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2;
for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++)
{
NcvBool bMoreNodesToTraverse = true;
if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage)
{
Ncv32u iNode = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes);
HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures();
Ncv32u iFeature = featuresDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.0f;
//TODO: fetch into shmem if size suffices. Shmem can be shared with reduce
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
Ncv32f rectWeight;
Ncv32u rectX, rectY, rectWidth, rectHeight;
getFeature<tbCacheTextureCascade>
(iFeature + iRect, d_Features,
&rectWeight, &rectX, &rectY, &rectWidth, &rectHeight);
Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride;
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) +
getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) -
getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg);
#if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY
curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight);
#else
curNodeVal += (Ncv32f)rectSum * rectWeight;
#endif
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = featuresDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
iNode = nextNodeDescriptor.getNextNodeOffset();
}
}
}
__syncthreads();
curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL;
}
Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum);
if (finalStageSum < stageThreshold)
{
bPass = false;
outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
if (!tbDoAtomicCompaction)
{
if (!bPass || d_inMask != d_outMask)
{
if (!threadIdx.x)
{
d_outMask[maskOffset] = outMaskVal;
}
}
}
else
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
if (bPass && !threadIdx.x)
{
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);
d_outMask[outMaskOffset] = outMaskVal;
}
#endif
}
}
template <NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction>
__global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
Ncv32u y_offs = blockIdx.y;
Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x;
Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs;
Ncv32u y_offs_upsc = step * y_offs;
Ncv32u x_offs_upsc = step * x_offs;
Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc;
Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U;
if (x_offs_upsc < anchorsRoi.width &&
(!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U))
{
outElem = (y_offs_upsc << 16) | x_offs_upsc;
}
if (!tbDoAtomicCompaction)
{
d_outMask[outMaskOffset] = outElem;
}
else
{
compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U,
outElem,
d_outMask);
}
}
struct applyHaarClassifierAnchorParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
applyHaarClassifierAnchorParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value,
Loki::TL::TypeAt<TList, 3>::Result::value,
Loki::TL::TypeAt<TList, 4>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively,
NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbReadPixelIndexFromVector,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor>
::call( &functor,
tbInitMaskPositively,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbReadPixelIndexFromVector,
tbDoAtomicCompaction);
}
struct applyHaarClassifierClassifierParallelFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_IImg;
Ncv32u IImgStride;
Ncv32f *d_weights;
Ncv32u weightsStride;
HaarFeature64 *d_Features;
HaarClassifierNode128 *d_ClassifierNodes;
HaarStage64 *d_Stages;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u startStageInc;
Ncv32u endStageExc;
Ncv32f scaleArea;
//Arguments are passed through the constructor
applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_IImg, Ncv32u _IImgStride,
Ncv32f *_d_weights, Ncv32u _weightsStride,
HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _startStageInc,
Ncv32u _endStageExc, Ncv32f _scaleArea) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_IImg(_d_IImg),
IImgStride(_IImgStride),
d_weights(_d_weights),
weightsStride(_weightsStride),
d_Features(_d_Features),
d_ClassifierNodes(_d_ClassifierNodes),
d_Stages(_d_Stages),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
startStageInc(_startStageInc),
endStageExc(_endStageExc),
scaleArea(_scaleArea)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
applyHaarClassifierClassifierParallel <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value,
Loki::TL::TypeAt<TList, 2>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
}
};
void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg,
NcvBool tbCacheTextureCascade,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_IImg, Ncv32u IImgStride,
Ncv32f *d_weights, Ncv32u weightsStride,
HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u startStageInc,
Ncv32u endStageExc, Ncv32f scaleArea)
{
applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream,
d_IImg, IImgStride,
d_weights, weightsStride,
d_Features, d_ClassifierNodes, d_Stages,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, startStageInc,
endStageExc, scaleArea);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor>
::call( &functor,
tbCacheTextureIImg,
tbCacheTextureCascade,
tbDoAtomicCompaction);
}
struct initializeMaskVectorFunctor
{
dim3 gridConf, blockConf;
cudaStream_t cuStream;
//Kernel arguments are stored as members;
Ncv32u *d_inMask;
Ncv32u *d_outMask;
Ncv32u mask1Dlen;
Ncv32u mask2Dstride;
NcvSize32u anchorsRoi;
Ncv32u step;
//Arguments are passed through the constructor
initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream,
Ncv32u *_d_inMask, Ncv32u *_d_outMask,
Ncv32u _mask1Dlen, Ncv32u _mask2Dstride,
NcvSize32u _anchorsRoi, Ncv32u _step) :
gridConf(_gridConf),
blockConf(_blockConf),
cuStream(_cuStream),
d_inMask(_d_inMask),
d_outMask(_d_outMask),
mask1Dlen(_mask1Dlen),
mask2Dstride(_mask2Dstride),
anchorsRoi(_anchorsRoi),
step(_step)
{}
template<class TList>
void call(TList tl)
{
(void)tl;
initializeMaskVector <
Loki::TL::TypeAt<TList, 0>::Result::value,
Loki::TL::TypeAt<TList, 1>::Result::value >
<<<gridConf, blockConf, 0, cuStream>>>
(d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
}
};
void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask,
NcvBool tbDoAtomicCompaction,
dim3 gridConf, dim3 blockConf, cudaStream_t cuStream,
Ncv32u *d_inMask, Ncv32u *d_outMask,
Ncv32u mask1Dlen, Ncv32u mask2Dstride,
NcvSize32u anchorsRoi, Ncv32u step)
{
initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream,
d_inMask, d_outMask,
mask1Dlen, mask2Dstride,
anchorsRoi, step);
//Second parameter is the number of "dynamic" template parameters
NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor>
::call( &functor,
tbMaskByInmask,
tbDoAtomicCompaction);
}
Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages)
{
Ncv32u i = 0;
for (; i<haar.NumStages; i++)
{
if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N)
{
break;
}
}
return i;
}
NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral,
NCVMatrix<Ncv32f> &d_weights,
NCVMatrixAlloc<Ncv32u> &d_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea,
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(integral.memType() == d_weights.memType()&&
integral.memType() == d_pixelMask.memType() &&
integral.memType() == gpuAllocator.memType() &&
(integral.memType() == NCVMemoryTypeDevice ||
integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height &&
d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height &&
integral.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
#if defined _SELF_TEST_
NCVStatus ncvStat;
NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch);
ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch);
ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length);
ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length);
ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch);
ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
ncvStat = d_pixelMask.copySolid(h_pixelMask, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = integral.copySolid(h_integralImage, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_weights.copySolid(h_weights, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<d_pixelMask.stride(); j++)
{
if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width))
{
if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U)
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j;
}
}
else
{
h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
}
}
NCV_SKIP_COND_END
#endif
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride());
ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE);
NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length()));
ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2);
ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
Ncv32u *hp_zero = &hp_pool32u.ptr()[0];
Ncv32u *hp_numDet = &hp_pool32u.ptr()[1];
NCV_SKIP_COND_BEGIN
*hp_zero = 0;
*hp_numDet = 0;
NCV_SKIP_COND_END
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
NcvBool bTexCacheCascade = devProp.major < 2;
NcvBool bTexCacheIImg = true; //this works better even on Fermi so far
NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3);
NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask;
NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp;
Ncv32u szNppCompactTmpBuf;
nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp);
if (bDoAtomicCompaction)
{
szNppCompactTmpBuf = 0;
}
NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf);
NCV_SKIP_COND_BEGIN
if (bTexCacheIImg)
{
cudaChannelFormatDesc cfdTexIImage;
cfdTexIImage = cudaCreateChannelDesc<Ncv32u>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage,
(anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
if (bTexCacheCascade)
{
cudaChannelFormatDesc cfdTexHaarFeatures;
cudaChannelFormatDesc cfdTexHaarClassifierNodes;
cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>();
cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>();
size_t alignmentOffset;
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures,
d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes,
d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR);
ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR);
}
Ncv32u stageStartAnchorParallel = 0;
Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL,
haar, h_HaarStages);
Ncv32u stageEndClassifierParallel = haar.NumStages;
if (stageMiddleSwitch == 0)
{
stageMiddleSwitch = 1;
}
//create stages subdivision for pixel-parallel processing
const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1;
Ncv32u curStop = stageStartAnchorParallel;
std::vector<Ncv32u> pixParallelStageStops;
while (curStop < stageMiddleSwitch)
{
pixParallelStageStops.push_back(curStop);
curStop += compactEveryNstage;
}
if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2)
{
pixParallelStageStops[pixParallelStageStops.size()-1] =
(stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2;
}
pixParallelStageStops.push_back(stageMiddleSwitch);
Ncv32u pixParallelStageStopsIndex = 0;
if (pixelStep != 1 || bMaskElements)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
(anchorsRoi.height + pixelStep - 1) / pixelStep);
dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL);
if (gridInit.x == 0 || gridInit.y == 0)
{
numDetections = 0;
return NCV_SUCCESS;
}
initializeMaskVectorDynTemplate(bMaskElements,
bDoAtomicCompaction,
gridInit, blockInit, cuStream,
d_ptrNowData->ptr(),
d_ptrNowTmp->ptr(),
static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(),
anchorsRoi, pixelStep);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
swap(d_ptrNowData, d_ptrNowTmp);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR);
}
numDetections = *hp_numDet;
}
else
{
//
// 1. Run the first pixel-input pixel-parallel classifier for few stages
//
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL),
anchorsRoi.height);
dim3 block1(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
true, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid1,
block1,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
0,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()),
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
pixParallelStageStopsIndex++;
}
//
// 2. Run pixel-parallel stages
//
for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++)
{
if (numDetections == 0)
{
break;
}
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL);
if (numDetections > MAX_GRID_DIM)
{
grid2.x = MAX_GRID_DIM;
grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block2(NUM_THREADS_ANCHORSPARALLEL);
applyHaarClassifierAnchorParallelDynTemplate(
false, //tbInitMaskPositively
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector
bDoAtomicCompaction, //tbDoAtomicCompaction
grid2,
block2,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
pixParallelStageStops[pixParallelStageStopsIndex],
pixParallelStageStops[pixParallelStageStopsIndex+1],
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
//
// 3. Run all left stages in one stage-parallel kernel
//
if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel)
{
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u),
0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
dim3 grid3(numDetections);
if (numDetections > MAX_GRID_DIM)
{
grid3.x = MAX_GRID_DIM;
grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM;
}
dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL);
applyHaarClassifierClassifierParallelDynTemplate(
bTexCacheIImg, //tbCacheTextureIImg
bTexCacheCascade, //tbCacheTextureCascade
bDoAtomicCompaction, //tbDoAtomicCompaction
grid3,
block3,
cuStream,
integral.ptr(), integral.stride(),
d_weights.ptr(), d_weights.stride(),
d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(),
d_ptrNowData->ptr(),
bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(),
numDetections,
d_pixelMask.stride(),
anchorsRoi,
stageMiddleSwitch,
stageEndClassifierParallel,
scaleAreaPixels);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u),
0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
else
{
NCVStatus nppSt;
nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections,
d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U,
d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp);
ncvAssertReturnNcvStat(nppSt);
}
swap(d_ptrNowData, d_ptrNowTmp);
numDetections = *hp_numDet;
}
if (d_ptrNowData != &d_vecPixelMask)
{
d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
#if defined _SELF_TEST_
ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
if (bDoAtomicCompaction)
{
std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections);
}
Ncv32u fpu_oldcw, fpu_cw;
_controlfp_s(&fpu_cw, 0, 0);
fpu_oldcw = fpu_cw;
_controlfp_s(&fpu_cw, _PC_24, _MCW_PC);
Ncv32u numDetGold;
ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar,
h_HaarStages, h_HaarNodes, h_HaarFeatures,
bMaskElements, anchorsRoi, pixelStep, scaleArea);
ncvAssertReturnNcvStat(ncvStat);
_controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);
bool bPass = true;
if (numDetGold != numDetections)
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections);
bPass = false;
}
else
{
for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++)
{
if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i])
{
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]);
bPass = false;
}
}
}
printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED");
#endif
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
//==============================================================================
//
// HypothesesOperations file
//
//==============================================================================
const Ncv32u NUM_GROW_THREADS = 128;
__device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale)
{
NcvRect32u res;
res.x = (Ncv32u)(scale * (pixel & 0xFFFF));
res.y = (Ncv32u)(scale * (pixel >> 16));
res.width = (Ncv32u)(scale * width);
res.height = (Ncv32u)(scale * height);
return res;
}
__global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements,
NcvRect32u *hypotheses,
Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x;
if (elemAddr >= numElements)
{
return;
}
hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale);
}
NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale,
cudaStream_t cuStream)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
dim3 block(NUM_GROW_THREADS);
dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy,
hypotheses.ptr() + totalDetections,
rectWidth, rectHeight, curScale);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
totalDetections += numDetsToCopy;
return ncvStat;
}
//==============================================================================
//
// Pipeline file
//
//==============================================================================
NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
NcvSize32u srcRoi,
NCVVector<NcvRect32u> &d_dstRects,
Ncv32u &dstNumRects,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarStage64> &d_HaarStages,
NCVVector<HaarClassifierNode128> &d_HaarNodes,
NCVVector<HaarFeature64> &d_HaarFeatures,
NcvSize32u minObjSize,
Ncv32u minNeighbors, //default 4
Ncv32f scaleStep, //default 1.2f
Ncv32u pixelStep, //default 1
Ncv32u flags, //default NCVPipeObjDet_Default
INCVMemAllocator &gpuAllocator,
INCVMemAllocator &cpuAllocator,
cudaDeviceProp &devProp,
cudaStream_t cuStream)
{
ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() &&
d_srcImg.memType() == gpuAllocator.memType() &&
(d_srcImg.memType() == NCVMemoryTypeDevice ||
d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() &&
d_HaarStages.memType() == d_HaarFeatures.memType() &&
(d_HaarStages.memType() == NCVMemoryTypeDevice ||
d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED);
ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL &&
h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL &&
d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR);
ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 &&
d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height &&
srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height &&
d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE);
ncvAssertReturn(d_HaarStages.length() >= haar.NumStages &&
d_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
d_HaarFeatures.length() >= haar.NumFeatures &&
d_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
//TODO: set NPP active stream to cuStream
NCVStatus ncvStat;
NCV_SET_SKIP_COND(gpuAllocator.isCounting());
Ncv32u integralWidth = d_srcImg.width() + 1;
Ncv32u integralHeight = d_srcImg.height() + 1;
NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height());
ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight);
ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height());
ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCVStatus nppStat;
Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;
nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral));
ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);
NCV_SKIP_COND_BEGIN
nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
integral.ptr(), integral.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(),
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
NcvSize32u(d_srcImg.width(), d_srcImg.height()),
d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
dstNumRects = 0;
Ncv32u lastCheckedScale = 0;
NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0);
std::vector<Ncv32u> scalesVector;
NcvBool bFoundLargestFace = false;
for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep)
{
Ncv32u scale = (Ncv32u)scaleIter;
if (lastCheckedScale == scale)
{
continue;
}
lastCheckedScale = scale;
if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width ||
haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height)
{
continue;
}
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
if (searchRoi.width <= 0 || searchRoi.height <= 0)
{
break;
}
scalesVector.push_back(scale);
if (gpuAllocator.isCounting())
{
break;
}
}
if (bReverseTraverseScale)
{
std::reverse(scalesVector.begin(), scalesVector.end());
}
//TODO: handle _fair_scale_ flag
for (Ncv32u i=0; i<scalesVector.size(); i++)
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
NCV_SKIP_COND_BEGIN
nppStat = nppiStDecimate_32u_C1R(
integral.ptr(), integral.pitch(),
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
nppStat = nppiStDecimate_64u_C1R(
d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
srcIIRoi, scale, true);
ncvAssertReturnNcvStat(nppStat);
const NcvRect32u rect(
HAAR_STDDEV_BORDER,
HAAR_STDDEV_BORDER,
haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,
haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);
nppStat = nppiStRectStdDev_32f_C1R(
d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(),
d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(),
d_rectStdDev.ptr(), d_rectStdDev.pitch(),
NcvSize32u(searchRoi.width, searchRoi.height), rect,
(Ncv32f)scale*scale, true);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_END
Ncv32u detectionsOnThisScale;
ncvStat = ncvApplyHaarClassifierCascade_device(
d_scaledIntegralImage, d_rectStdDev, d_pixelMask,
detectionsOnThisScale,
haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,
searchRoi, pixelStep, (Ncv32f)scale*scale,
gpuAllocator, cpuAllocator, devProp, cuStream);
ncvAssertReturnNcvStat(nppStat);
NCV_SKIP_COND_BEGIN
NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment());
ncvStat = ncvGrowDetectionsVector_device(
d_vecPixelMask,
detectionsOnThisScale,
d_hypothesesIntermediate,
dstNumRects,
static_cast<Ncv32u>(d_hypothesesIntermediate.length()),
haar.ClassifierSize.width,
haar.ClassifierSize.height,
(Ncv32f)scale,
cuStream);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (dstNumRects == 0)
{
continue;
}
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
Ncv32u numStrongHypothesesNow = dstNumRects;
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
numStrongHypothesesNow,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (numStrongHypothesesNow > 0)
{
NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0];
for (Ncv32u j=1; j<numStrongHypothesesNow; j++)
{
if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width)
{
maxRect = h_hypothesesIntermediate.ptr()[j];
}
}
h_hypothesesIntermediate.ptr()[0] = maxRect;
dstNumRects = 1;
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
bFoundLargestFace = true;
break;
}
}
NCV_SKIP_COND_END
if (gpuAllocator.isCounting())
{
break;
}
}
NCVStatus ncvRetCode = NCV_SUCCESS;
NCV_SKIP_COND_BEGIN
if (flags & NCVPipeObjDet_FindLargestObject)
{
if (!bFoundLargestFace)
{
dstNumRects = 0;
}
}
else
{
//TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left)
if (dstNumRects != 0)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
}
ncvStat = ncvGroupRectangles_host(
h_hypothesesIntermediate,
dstNumRects,
minNeighbors,
RECT_SIMILARITY_PROPORTION,
NULL);
ncvAssertReturnNcvStat(ncvStat);
if (dstNumRects > d_dstRects.length())
{
ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
dstNumRects = static_cast<Ncv32u>(d_dstRects.length());
}
if (dstNumRects != 0)
{
ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream,
dstNumRects * sizeof(NcvRect32u));
ncvAssertReturnNcvStat(ncvStat);
}
}
if (flags & NCVPipeObjDet_VisualizeInPlace)
{
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(),
d_srcImg.width(), d_srcImg.height(),
d_dstRects.ptr(), dstNumRects, 255, cuStream);
}
NCV_SKIP_COND_END
return ncvRetCode;
}
//==============================================================================
//
// Purely Host code: classifier IO, mock-ups
//
//==============================================================================
#ifdef _SELF_TEST_
#include <float.h>
#endif
NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage,
NCVMatrix<Ncv32f> &h_weights,
NCVMatrixAlloc<Ncv32u> &h_pixelMask,
Ncv32u &numDetections,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures,
NcvBool bMaskElements,
NcvSize32u anchorsRoi,
Ncv32u pixelStep,
Ncv32f scaleArea)
{
ncvAssertReturn(h_integralImage.memType() == h_weights.memType() &&
h_integralImage.memType() == h_pixelMask.memType() &&
(h_integralImage.memType() == NCVMemoryTypeHostPageable ||
h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() &&
h_HaarStages.memType() == h_HaarFeatures.memType() &&
(h_HaarStages.memType() == NCVMemoryTypeHostPageable ||
h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL &&
h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 &&
h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height &&
h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height &&
h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width &&
h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE);
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages &&
h_HaarNodes.length() >= haar.NumClassifierTotalNodes &&
h_HaarFeatures.length() >= haar.NumFeatures &&
h_HaarStages.length() == h_HaarStages.length() &&
haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES);
ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP);
Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) *
(haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER));
for (Ncv32u i=0; i<anchorsRoi.height; i++)
{
for (Ncv32u j=0; j<h_pixelMask.stride(); j++)
{
if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width)
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
}
else
{
for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++)
{
Ncv32f curStageSum = 0.0f;
Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes();
Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset();
if (iStage == 0)
{
if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
else
{
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j);
}
}
else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
while (numRootNodesInStage--)
{
NcvBool bMoreNodesToTraverse = true;
Ncv32u curNodeOffset = curRootNodeOffset;
while (bMoreNodesToTraverse)
{
HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset];
HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc();
Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures();
Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset();
Ncv32f curNodeVal = 0.f;
for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++)
{
HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect];
Ncv32u rectX, rectY, rectWidth, rectHeight;
feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight);
Ncv32f rectWeight = feature.getWeight();
Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX);
Ncv32u iioffsTR = iioffsTL + rectWidth;
Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride();
Ncv32u iioffsBR = iioffsBL + rectWidth;
Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL];
Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR];
Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL];
Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR];
Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR;
curNodeVal += (Ncv32f)rectSum * rectWeight;
}
HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc();
HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc();
Ncv32f nodeThreshold = curNode.getThreshold();
HaarClassifierNodeDescriptor32 nextNodeDescriptor;
NcvBool nextNodeIsLeaf;
if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold)
{
nextNodeDescriptor = nodeLeft;
nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf();
}
else
{
nextNodeDescriptor = nodeRight;
nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf();
}
if (nextNodeIsLeaf)
{
Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost();
curStageSum += tmpLeafValue;
bMoreNodesToTraverse = false;
}
else
{
curNodeOffset = nextNodeDescriptor.getNextNodeOffset();
}
}
curRootNodeOffset++;
}
Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold();
if (curStageSum < tmpStageThreshold)
{
//drop
h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U;
break;
}
}
}
}
}
std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride());
Ncv32u i = 0;
for (; i<anchorsRoi.height * h_pixelMask.stride(); i++)
{
if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U)
{
break;
}
}
numDetections = i;
return NCV_SUCCESS;
}
NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask,
Ncv32u numPixelMaskDetections,
NCVVector<NcvRect32u> &hypotheses,
Ncv32u &totalDetections,
Ncv32u totalMaxDetections,
Ncv32u rectWidth,
Ncv32u rectHeight,
Ncv32f curScale)
{
ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR);
ncvAssertReturn(pixelMask.memType() == hypotheses.memType() &&
pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI);
ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE);
ncvAssertReturn(totalMaxDetections <= hypotheses.length() &&
numPixelMaskDetections <= pixelMask.length() &&
totalMaxDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT);
NCVStatus ncvStat = NCV_SUCCESS;
Ncv32u numDetsToCopy = numPixelMaskDetections;
if (numDetsToCopy == 0)
{
return ncvStat;
}
if (totalDetections + numPixelMaskDetections > totalMaxDetections)
{
ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW;
numDetsToCopy = totalMaxDetections - totalDetections;
}
for (Ncv32u i=0; i<numDetsToCopy; i++)
{
hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale);
}
totalDetections += numDetsToCopy;
return ncvStat;
}
static NCVStatus loadFromXML(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
#ifndef HAVE_OPENCV_OBJDETECT
(void) filename;
(void) haar;
(void) haarStages;
(void) haarClassifierNodes;
(void) haarFeatures;
CV_Error(cv::Error::StsNotImplemented, "This functionality requires objdetect module");
return NCV_HAAR_XML_LOADING_EXCEPTION;
#else
NCVStatus ncvStat;
haar.NumStages = 0;
haar.NumClassifierRootNodes = 0;
haar.NumClassifierTotalNodes = 0;
haar.NumFeatures = 0;
haar.ClassifierSize.width = 0;
haar.ClassifierSize.height = 0;
haar.bHasStumpsOnly = true;
haar.bNeedsTiltedII = false;
Ncv32u curMaxTreeDepth = 0;
std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes;
haarStages.resize(0);
haarClassifierNodes.resize(0);
haarFeatures.resize(0);
cv::Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
if (oldCascade.empty())
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
int stagesCound = oldCascade->count;
for(int s = 0; s < stagesCound; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));
curStage.setStageThreshold(oldCascade->stage_classifier[s].threshold);
int treesCount = oldCascade->stage_classifier[s].count;
for(int t = 0; t < treesCount; ++t) // by trees
{
Ncv32u nodeId = 0;
CvHaarClassifier* tree = &oldCascade->stage_classifier[s].classifier[t];
int nodesCount = tree->count;
for(int n = 0; n < nodesCount; ++n) //by features
{
CvHaarFeature* feature = &tree->haar_feature[n];
HaarClassifierNode128 curNode;
curNode.setThreshold(tree->threshold[n]);
NcvBool bIsLeftNodeLeaf = false;
NcvBool bIsRightNodeLeaf = false;
HaarClassifierNodeDescriptor32 nodeLeft;
if ( tree->left[n] <= 0 )
{
Ncv32f leftVal = tree->alpha[-tree->left[n]];
ncvStat = nodeLeft.create(leftVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsLeftNodeLeaf = true;
}
else
{
Ncv32u leftNodeOffset = tree->left[n];
nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight;
if ( tree->right[n] <= 0 )
{
Ncv32f rightVal = tree->alpha[-tree->right[n]];
ncvStat = nodeRight.create(rightVal);
ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat);
bIsRightNodeLeaf = true;
}
else
{
Ncv32u rightNodeOffset = tree->right[n];
nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1));
haar.bHasStumpsOnly = false;
}
curNode.setRightNodeDesc(nodeRight);
Ncv32u tiltedVal = feature->tilted;
haar.bNeedsTiltedII = (tiltedVal != 0);
Ncv32u featureId = 0;
for(int l = 0; l < CV_HAAR_FEATURE_MAX; ++l) //by rects
{
Ncv32u rectX = feature->rect[l].r.x;
Ncv32u rectY = feature->rect[l].r.y;
Ncv32u rectWidth = feature->rect[l].r.width;
Ncv32u rectHeight = feature->rect[l].r.height;
Ncv32f rectWeight = feature->rect[l].weight;
if (rectWeight == 0/* && rectX == 0 &&rectY == 0 && rectWidth == 0 && rectHeight == 0*/)
break;
HaarFeature64 curFeature;
ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height);
curFeature.setWeight(rectWeight);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
haarFeatures.push_back(curFeature);
featureId++;
}
HaarFeatureDescriptor32 tmpFeatureDesc;
ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf,
featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId);
ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat);
curNode.setFeatureDesc(tmpFeatureDesc);
if (!nodeId)
{
//root node
haarClassifierNodes.push_back(curNode);
curMaxTreeDepth = 1;
}
else
{
//other node
h_TmpClassifierNotRootNodes.push_back(curNode);
curMaxTreeDepth++;
}
nodeId++;
}
}
curStage.setNumClassifierRootNodes(treesCount);
haarStages.push_back(curStage);
}
//fill in cascade stats
haar.NumStages = static_cast<Ncv32u>(haarStages.size());
haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size());
haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size());
haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size());
//merge root and leaf nodes in one classifiers array
Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size());
for (Ncv32u i=0; i<haarClassifierNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
haarClassifierNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
haarClassifierNodes[i].setRightNodeDesc(nodeRight);
}
for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++)
{
HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc();
HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc();
if (!featureDesc.isLeftNodeLeaf())
{
Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot;
nodeLeft.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft);
HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc();
if (!featureDesc.isRightNodeLeaf())
{
Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot;
nodeRight.create(newOffset);
}
h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight);
haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]);
}
return NCV_SUCCESS;
#endif
}
#define NVBIN_HAAR_SIZERESERVED 16
#define NVBIN_HAAR_VERSION 0x1
static NCVStatus loadFromNVBIN(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
std::vector<HaarStage64> &haarStages,
std::vector<HaarClassifierNode128> &haarClassifierNodes,
std::vector<HaarFeature64> &haarFeatures)
{
size_t readCount;
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
Ncv32u fsize;
readCount = fread(&fsize, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fseek(fp, 0, SEEK_END);
Ncv32u fsizeActual = ftell(fp);
ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR);
std::vector<unsigned char> fdata;
fdata.resize(fsize);
Ncv32u dataOffset = 0;
fseek(fp, 0, SEEK_SET);
readCount = fread(&fdata[0], fsize, 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(Ncv32u);
haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvSize32u);
haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset);
dataOffset += sizeof(NcvBool);
haarStages.resize(haar.NumStages);
haarClassifierNodes.resize(haar.NumClassifierTotalNodes);
haarFeatures.resize(haar.NumFeatures);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages);
dataOffset += szStages;
memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers);
dataOffset += szClassifiers;
memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures);
dataOffset += szFeatures;
return NCV_SUCCESS;
}
NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages,
Ncv32u &numNodes, Ncv32u &numFeatures)
{
size_t readCount;
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
if (fext == "nvbin")
{
FILE *fp = fopen(filename.c_str(), "rb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
Ncv32u fileVersion;
readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR);
fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET);
Ncv32u tmp;
readCount = fread(&numStages, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&tmp, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp);
ncvAssertReturn(1 == readCount, NCV_FILE_ERROR);
fclose(fp);
}
else if (fext == "xml")
{
HaarClassifierCascadeDescriptor haar;
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
numStages = haar.NumStages;
numNodes = haar.NumClassifierTotalNodes;
numFeatures = haar.NumFeatures;
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
return NCV_SUCCESS;
}
NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename,
HaarClassifierCascadeDescriptor &haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
NCVStatus ncvStat;
cv::String fext = filename.substr(filename.find_last_of(".") + 1);
fext = fext.toLowerCase();
std::vector<HaarStage64> haarStages;
std::vector<HaarClassifierNode128> haarNodes;
std::vector<HaarFeature64> haarFeatures;
if (fext == "nvbin")
{
ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else if (fext == "xml")
{
ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures);
ncvAssertReturnNcvStat(ncvStat);
}
else
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY);
memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64));
memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128));
memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64));
return NCV_SUCCESS;
}
NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename,
HaarClassifierCascadeDescriptor haar,
NCVVector<HaarStage64> &h_HaarStages,
NCVVector<HaarClassifierNode128> &h_HaarNodes,
NCVVector<HaarFeature64> &h_HaarFeatures)
{
ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT);
ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned &&
h_HaarNodes.memType() == NCVMemoryTypeHostPinned &&
h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
Ncv32u szStages = haar.NumStages * sizeof(HaarStage64);
Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128);
Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64);
Ncv32u dataOffset = 0;
std::vector<unsigned char> fdata;
fdata.resize(szStages+szClassifiers+szFeatures+1024, 0);
//header
*(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION;
//data
dataOffset = NVBIN_HAAR_SIZERESERVED;
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes;
dataOffset += sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures;
dataOffset += sizeof(Ncv32u);
*(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize;
dataOffset += sizeof(NcvSize32u);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII;
dataOffset += sizeof(NcvBool);
*(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly;
dataOffset += sizeof(NcvBool);
memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages);
dataOffset += szStages;
memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers);
dataOffset += szClassifiers;
memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures);
dataOffset += szFeatures;
Ncv32u fsize = dataOffset;
//TODO: CRC32 here
//update header
dataOffset = sizeof(Ncv32u);
*(Ncv32u *)(&fdata[0]+dataOffset) = fsize;
FILE *fp = fopen(filename.c_str(), "wb");
ncvAssertReturn(fp != NULL, NCV_FILE_ERROR);
fwrite(&fdata[0], fsize, 1, fp);
fclose(fp);
return NCV_SUCCESS;
}
|
100cb89d919d80f9acdcbb824663aa2c6e2c4640.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helpers.h"
namespace cuda {
thread_local CublasHandle g_cuhandle;
namespace helpers {
__global__ void cuFill(float* array, size_t size, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < size; i += gridDim.x * blockDim.x) {
array[i] = val;
}
}
void fill(Ptr<float> array, size_t size, float val) {
hipLaunchKernelGGL(( cuFill), dim3((size + 512 - 1) / 512), dim3(512), 0, 0, array.Get(), size, val);
}
} // helpers
} // cuda
| 100cb89d919d80f9acdcbb824663aa2c6e2c4640.cu | #include "helpers.h"
namespace cuda {
thread_local CublasHandle g_cuhandle;
namespace helpers {
__global__ void cuFill(float* array, size_t size, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < size; i += gridDim.x * blockDim.x) {
array[i] = val;
}
}
void fill(Ptr<float> array, size_t size, float val) {
cuFill<<<(size + 512 - 1) / 512, 512>>>(array.Get(), size, val);
}
} // helpers
} // cuda
|
0db2b3a7ac30a9c095b7f2bf4a4e84a80aee6781.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BlockFilter.h"
#include <cstdio>
#include "constants.h"
#include "CUDAUtils.h"
#define BLK_DEPTH 1
#define BLK_HEIGHT 32
#define BLK_WIDTH BLK_HEIGHT
texture<float, hipTextureType1D> t_dataB;
texture<float4, hipTextureType1D> t_dataB4;
__constant__ float4 c_AFP[BLK_WIDTH];
__constant__ float4 c_AbF;
__constant__ float4 c_AbR;
__constant__ float4 c_HARB_AFP;
#ifdef __CUDA_ARCH__
__host__ __device__ __forceinline__
#else
inline
#endif
float4
filterHorOperation4(
const float4 &val0, const float4 &val1, const float a)
{
float4 res = val0;
res.x += a * (val1.x - val0.x);
res.y += a * (val1.y - val0.y);
res.z += a * (val1.z - val0.z);
res.w += a * (val1.w - val0.w);
return res;
}
#ifdef __CUDA_ARCH__
__host__ __device__ __forceinline__
#else
inline
#endif
float4
multiplyAdd(const float4 &add0, const float4 &fact0, const float4 &fact1)
{
float4 res = add0;
res.x += fact0.x * fact1.x;
res.y += fact0.y * fact1.y;
res.z += fact0.z * fact1.z;
res.w += fact0.w * fact1.w;
return res;
}
__global__ void
kernComputePYbarAndEzhat(
float4 *const g_pybar, float4 *const g_ezhat,
const int volDepth, const int volHeight, const int volWidth,
const float a)
{
const int tj = threadIdx.x;
const int tk = threadIdx.y;
const int bi = blockIdx.x;
const int bj = blockIdx.y;
const int bk = blockIdx.z;
const int base_i = BLK_WIDTH * bi;
const int base_j = BLK_HEIGHT * bj;
const int base_k = BLK_DEPTH * bk;
if (base_j + tj >= volHeight) return;
// Compute Ybar = F(0, B(X)) in s_data
__shared__ float4 s_data[BLK_DEPTH][BLK_HEIGHT][BLK_WIDTH];
float4 prevVal;
const int base =
base_i
+ (base_j + tj) * volWidth
+ (base_k + tk) * volWidth * volHeight;
if (bi != 0) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][0] = prevVal = newVal;
} else {
s_data[tk][tj][0] = prevVal = tex1Dfetch(t_dataB4, base+0);
}
if (bi < gridDim.x-1) {
#pragma unroll
for (int i = 1; i < BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
} else {
for (int i = 1; i < volWidth%BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
}
// Store PYbar as T(Ybar)
// const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = gridDim.x; // Appendix width
const int appPos =
bi
+ (base_j + tj) * appWidth
+ (base_k + tk) * appWidth * appHeight;
g_pybar[appPos] = prevVal;
// Compute Zhat = R(Ybar) from s_data
if (bi < appWidth-1) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
#pragma unroll
for (int i = BLK_WIDTH - 1; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
prevVal = newVal;
}
} else {
for (int i = (volWidth%BLK_WIDTH) - 2; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
prevVal = newVal;
}
}
// Store EZhat as H(Zhat)
g_ezhat[appPos] = prevVal;
}
__global__ void
kernComputePYandEZ(
float4 *const g_pybar, float4 *const g_ezhat,
const int volDepth, const int volHeight, const int volWidth)
{
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int k = threadIdx.y + blockIdx.y * blockDim.y;
const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = ceil_div(volWidth, BLK_WIDTH); // Appendix width
if (j < appHeight && k < appDepth) {
const int base = j * appWidth + k * appWidth * appHeight;
float4 prevValY = g_pybar[base+0];
// Compute Pm(Y) = Pm(Ybar) + AbF * Pm-1(Y)
for (int i = 1; i < appWidth; i++) {
const float4 curValYbar = g_pybar[base+i];
const float4 newValY = multiplyAdd(curValYbar, prevValY, c_AbF);
g_pybar[base+i] = prevValY = newValY;
}
// Compute Em(Z) = AbR * Em+1(Z) + (H(ARB) * AFP) * Pm-1(Y) * Em(Zhat)
float4 prevValZ;
float4 curValZhat = g_ezhat[base+appWidth-1];
float4 newValZ = multiplyAdd(curValZhat, c_HARB_AFP, prevValY);
g_ezhat[base+appWidth-1] = prevValZ = newValZ;
for (int i = appWidth-2; i >= 1; i--) {
curValZhat = g_ezhat[base+i];
newValZ = multiplyAdd(curValZhat, c_AbR, prevValZ);
newValZ = multiplyAdd(newValZ, c_HARB_AFP, g_pybar[base+i-1]);
g_ezhat[base+i] = prevValZ = newValZ;
}
curValZhat = g_ezhat[base+0];
newValZ = multiplyAdd(curValZhat, c_AbR, prevValZ);
g_ezhat[base+0] = prevValZ = newValZ;
}
}
__global__ void
kernComputeYandZ(
float4 *const g_data,
const float4 *const g_py, const float4 *const g_ez,
const int volDepth, const int volHeight, const int volWidth,
const float a)
{
const int tj = threadIdx.x;
const int tk = threadIdx.y;
const int bi = blockIdx.x;
const int bj = blockIdx.y;
const int bk = blockIdx.z;
const int base_i = BLK_WIDTH * bi;
const int base_j = BLK_HEIGHT * bj;
const int base_k = BLK_DEPTH * bk;
if (base_j + tj >= volHeight) return;
__shared__ float4 s_data[BLK_DEPTH][BLK_HEIGHT][BLK_WIDTH];
// const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = gridDim.x; // Appendix width
const int base =
base_i
+ (base_j + tj) * volWidth
+ (base_k + tk) * volWidth * volHeight;
const int appPos =
bi
+ (base_j + tj) * appWidth
+ (base_k + tk) * appWidth * appHeight;
// Compute Bm(Y) in s_data from Pm-1(Y)
float4 prevVal;
if (bi > 0) {
prevVal = g_py[appPos-1];
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][0] = prevVal = newVal;
} else {
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
s_data[tk][tj][0] = prevVal = curVal;
}
if (bi < appWidth-1) {
#pragma unroll
for (int i = 1; i < BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
} else {
for (int i = 1; i < volWidth%BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
}
// Compute Bm(Z) from s_data and Em+1(Z)
if (bi < appWidth-1) {
prevVal = g_ez[appPos+1];
#pragma unroll
for (int i = BLK_WIDTH - 1; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
g_data[base+i] = prevVal = newVal;
}
} else {
float4 curVal = s_data[tk][tj][(volWidth%BLK_WIDTH)-1];
g_data[base+(volWidth%BLK_WIDTH)-1] = prevVal = curVal;
for (int i = (volWidth%BLK_WIDTH) - 2; i >= 0; i--) {
curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
g_data[base+i] = prevVal = newVal;
}
}
}
void
computeConstants(const float a)
{
// Compute AFP = F(Ir, 0), where Ir is an rxr unit matrix
// and 0 is an rxb zero matrix. In this case, r=1, and b=BLK_WIDTH.
float4 h_AFP[1][BLK_WIDTH];
float4 prevVal = make_float4(1.f, 1.f, 1.f, 1.f);
for (int i = 0; i < BLK_WIDTH; i++) {
const float4 curVal = make_float4(0.f, 0.f, 0.f, 0.f);
h_AFP[0][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
// Compute ARE = R(0, Ir)
float4 h_ARE[1][BLK_WIDTH];
prevVal = make_float4(1.f, 1.f, 1.f, 1.f);
for (int i = BLK_WIDTH-1; i >= 0; i--) {
const float4 curVal = make_float4(0.f, 0.f, 0.f, 0.f);
h_ARE[0][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
// // Compute AFB = F(0, Ib), where Ib is a bxb unit matrix and 0 is
// // a bxr zero matrix. In this case, r=1, and b=BLK_WIDTH=BLK_HEIGHT.
// float4 h_AFB[BLK_HEIGHT][BLK_WIDTH];
//
// for (int j = 0; j < BLK_HEIGHT; j++) {
// prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
// for (int i = 0; i < BLK_WIDTH; i++) {
// const float4 curVal = (i != j) ? make_float4(0.f, 0.f, 0.f, 0.f)
// : make_float4(1.f, 1.f, 1.f, 1.f);
// h_AFB[j][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
// }
// }
// Compute ARB = R(Ib, 0)
float4 h_ARB[BLK_HEIGHT][BLK_WIDTH];
for (int j = 0; j < BLK_HEIGHT; j++) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = BLK_WIDTH; i >= 0; i--) {
const float4 curVal = (i != j) ? make_float4(0.f, 0.f, 0.f, 0.f)
: make_float4(1.f, 1.f, 1.f, 1.f);
h_ARB[j][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
}
// Compute AbF = T(AFP) and AbR = H(ARE)
const float4 h_AbF = h_AFP[0][BLK_WIDTH-1];
const float4 h_AbR = h_ARE[0][0];
// Compute H(ARB) * AFP
float4 h_HARB_AFP = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = 0; i < BLK_HEIGHT; i++) {
h_HARB_AFP = multiplyAdd(h_HARB_AFP, h_AFP[0][i], h_ARB[i][0]);
}
// Load symbols into constant memory
CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_AFP, &h_AFP[0], sizeof(h_AFP),
0, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_AbF, &h_AbF, sizeof(h_AbF),
0, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_AbR, &h_AbR, sizeof(h_AbR),
0, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_HARB_AFP, &h_HARB_AFP,
sizeof(h_HARB_AFP),
0, hipMemcpyHostToDevice));
}
void
computePYbarAndEZhat(
float4 *const d_pybar, float4 *const d_ezhat,
const int depth, const int height, const int width,
const float a)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(width, BLK_WIDTH),
ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
hipLaunchKernelGGL(( kernComputePYbarAndEzhat), dim3(grdSize), dim3(blkSize), 0, 0,
d_pybar, d_ezhat, depth, height, width, a);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipGetLastError());
}
void
computePYandEZ(
float4 *const d_pybar, float4 *const d_ezhat,
const int depth, const int height, const int width)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
hipLaunchKernelGGL(( kernComputePYandEZ), dim3(grdSize), dim3(blkSize), 0, 0,
d_pybar, d_ezhat, depth, height, width);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipGetLastError());
}
void
computeYandZ(
float4 *const d_data,
const float4 *const d_py, const float4 *const d_ez,
const int depth, const int height, const int width,
const float a)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(width, BLK_WIDTH),
ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
hipLaunchKernelGGL(( kernComputeYandZ), dim3(grdSize), dim3(blkSize), 0, 0,
d_data, d_py, d_ez, depth, height, width, a);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipGetLastError());
}
void
computeBlockResultFromTexture(
float4 *const h_resData,
const float4 *const h_origData,
const int depth, const int height, const int width,
const float a)
{
const int dataSize = depth * height * width;
// Copy original data in GPU
float4 *d_origData;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_origData,
sizeof(*d_origData) * dataSize));
CUDA_CHECK_RETURN(hipMemcpy(d_origData, h_origData,
sizeof(*d_origData) * dataSize,
hipMemcpyHostToDevice));
// Generate buffer to store result in GPU
float4 *d_resData;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_resData,
sizeof(*d_resData) * dataSize));
// Generate buffer to store P(Ybar) and E(Zhat)
const int numBlocksDepth = ceil_div(depth, BLK_DEPTH);;
const int numBlocksHeight = ceil_div(height, BLK_HEIGHT);
const int numBlocksWidth = ceil_div(width, BLK_WIDTH);
const int numBlocksTotal = numBlocksDepth * numBlocksHeight * numBlocksWidth;
float4 *d_pybar;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_pybar,
numBlocksTotal * BLK_HEIGHT * BLK_DEPTH * sizeof(*d_pybar)));
float4 *d_ezhat;
CUDA_CHECK_RETURN(hipMalloc((void**) &d_ezhat,
numBlocksTotal * BLK_HEIGHT * BLK_DEPTH * sizeof(*d_pybar)));
CUDA_CHECK_RETURN(hipBindTexture(0, t_dataB4, d_origData,
sizeof(*d_origData) * dataSize));
for (int i = 0; i < NUM_ITERS; i++) {
computePYbarAndEZhat(d_pybar, d_ezhat,
depth, height, width, a);
computePYandEZ(d_pybar, d_ezhat, depth, height, width);
computeYandZ(d_resData, d_pybar, d_ezhat,
depth, height, width, a);
}
// Unbind texture
CUDA_CHECK_RETURN(hipUnbindTexture(t_dataB4));
// Copy result into CPU memory
CUDA_CHECK_RETURN(hipMemcpy(h_resData, d_resData,
sizeof(*d_origData) * dataSize,
hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipFree(d_origData));
CUDA_CHECK_RETURN(hipFree(d_resData));
}
| 0db2b3a7ac30a9c095b7f2bf4a4e84a80aee6781.cu | #include "BlockFilter.h"
#include <cstdio>
#include "constants.h"
#include "CUDAUtils.h"
#define BLK_DEPTH 1
#define BLK_HEIGHT 32
#define BLK_WIDTH BLK_HEIGHT
texture<float, cudaTextureType1D> t_dataB;
texture<float4, cudaTextureType1D> t_dataB4;
__constant__ float4 c_AFP[BLK_WIDTH];
__constant__ float4 c_AbF;
__constant__ float4 c_AbR;
__constant__ float4 c_HARB_AFP;
#ifdef __CUDA_ARCH__
__host__ __device__ __forceinline__
#else
inline
#endif
float4
filterHorOperation4(
const float4 &val0, const float4 &val1, const float a)
{
float4 res = val0;
res.x += a * (val1.x - val0.x);
res.y += a * (val1.y - val0.y);
res.z += a * (val1.z - val0.z);
res.w += a * (val1.w - val0.w);
return res;
}
#ifdef __CUDA_ARCH__
__host__ __device__ __forceinline__
#else
inline
#endif
float4
multiplyAdd(const float4 &add0, const float4 &fact0, const float4 &fact1)
{
float4 res = add0;
res.x += fact0.x * fact1.x;
res.y += fact0.y * fact1.y;
res.z += fact0.z * fact1.z;
res.w += fact0.w * fact1.w;
return res;
}
__global__ void
kernComputePYbarAndEzhat(
float4 *const g_pybar, float4 *const g_ezhat,
const int volDepth, const int volHeight, const int volWidth,
const float a)
{
const int tj = threadIdx.x;
const int tk = threadIdx.y;
const int bi = blockIdx.x;
const int bj = blockIdx.y;
const int bk = blockIdx.z;
const int base_i = BLK_WIDTH * bi;
const int base_j = BLK_HEIGHT * bj;
const int base_k = BLK_DEPTH * bk;
if (base_j + tj >= volHeight) return;
// Compute Ybar = F(0, B(X)) in s_data
__shared__ float4 s_data[BLK_DEPTH][BLK_HEIGHT][BLK_WIDTH];
float4 prevVal;
const int base =
base_i
+ (base_j + tj) * volWidth
+ (base_k + tk) * volWidth * volHeight;
if (bi != 0) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][0] = prevVal = newVal;
} else {
s_data[tk][tj][0] = prevVal = tex1Dfetch(t_dataB4, base+0);
}
if (bi < gridDim.x-1) {
#pragma unroll
for (int i = 1; i < BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
} else {
for (int i = 1; i < volWidth%BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
}
// Store PYbar as T(Ybar)
// const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = gridDim.x; // Appendix width
const int appPos =
bi
+ (base_j + tj) * appWidth
+ (base_k + tk) * appWidth * appHeight;
g_pybar[appPos] = prevVal;
// Compute Zhat = R(Ybar) from s_data
if (bi < appWidth-1) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
#pragma unroll
for (int i = BLK_WIDTH - 1; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
prevVal = newVal;
}
} else {
for (int i = (volWidth%BLK_WIDTH) - 2; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
prevVal = newVal;
}
}
// Store EZhat as H(Zhat)
g_ezhat[appPos] = prevVal;
}
__global__ void
kernComputePYandEZ(
float4 *const g_pybar, float4 *const g_ezhat,
const int volDepth, const int volHeight, const int volWidth)
{
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int k = threadIdx.y + blockIdx.y * blockDim.y;
const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = ceil_div(volWidth, BLK_WIDTH); // Appendix width
if (j < appHeight && k < appDepth) {
const int base = j * appWidth + k * appWidth * appHeight;
float4 prevValY = g_pybar[base+0];
// Compute Pm(Y) = Pm(Ybar) + AbF * Pm-1(Y)
for (int i = 1; i < appWidth; i++) {
const float4 curValYbar = g_pybar[base+i];
const float4 newValY = multiplyAdd(curValYbar, prevValY, c_AbF);
g_pybar[base+i] = prevValY = newValY;
}
// Compute Em(Z) = AbR * Em+1(Z) + (H(ARB) * AFP) * Pm-1(Y) * Em(Zhat)
float4 prevValZ;
float4 curValZhat = g_ezhat[base+appWidth-1];
float4 newValZ = multiplyAdd(curValZhat, c_HARB_AFP, prevValY);
g_ezhat[base+appWidth-1] = prevValZ = newValZ;
for (int i = appWidth-2; i >= 1; i--) {
curValZhat = g_ezhat[base+i];
newValZ = multiplyAdd(curValZhat, c_AbR, prevValZ);
newValZ = multiplyAdd(newValZ, c_HARB_AFP, g_pybar[base+i-1]);
g_ezhat[base+i] = prevValZ = newValZ;
}
curValZhat = g_ezhat[base+0];
newValZ = multiplyAdd(curValZhat, c_AbR, prevValZ);
g_ezhat[base+0] = prevValZ = newValZ;
}
}
__global__ void
kernComputeYandZ(
float4 *const g_data,
const float4 *const g_py, const float4 *const g_ez,
const int volDepth, const int volHeight, const int volWidth,
const float a)
{
const int tj = threadIdx.x;
const int tk = threadIdx.y;
const int bi = blockIdx.x;
const int bj = blockIdx.y;
const int bk = blockIdx.z;
const int base_i = BLK_WIDTH * bi;
const int base_j = BLK_HEIGHT * bj;
const int base_k = BLK_DEPTH * bk;
if (base_j + tj >= volHeight) return;
__shared__ float4 s_data[BLK_DEPTH][BLK_HEIGHT][BLK_WIDTH];
// const int appDepth = volDepth; // Appendix depth
const int appHeight = volHeight; // Appendix height
const int appWidth = gridDim.x; // Appendix width
const int base =
base_i
+ (base_j + tj) * volWidth
+ (base_k + tk) * volWidth * volHeight;
const int appPos =
bi
+ (base_j + tj) * appWidth
+ (base_k + tk) * appWidth * appHeight;
// Compute Bm(Y) in s_data from Pm-1(Y)
float4 prevVal;
if (bi > 0) {
prevVal = g_py[appPos-1];
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][0] = prevVal = newVal;
} else {
const float4 curVal = tex1Dfetch(t_dataB4, base+0);
s_data[tk][tj][0] = prevVal = curVal;
}
if (bi < appWidth-1) {
#pragma unroll
for (int i = 1; i < BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
} else {
for (int i = 1; i < volWidth%BLK_WIDTH; i++) {
const float4 curVal = tex1Dfetch(t_dataB4, base+i);
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
s_data[tk][tj][i] = prevVal = newVal;
}
}
// Compute Bm(Z) from s_data and Em+1(Z)
if (bi < appWidth-1) {
prevVal = g_ez[appPos+1];
#pragma unroll
for (int i = BLK_WIDTH - 1; i >= 0; i--) {
const float4 curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
g_data[base+i] = prevVal = newVal;
}
} else {
float4 curVal = s_data[tk][tj][(volWidth%BLK_WIDTH)-1];
g_data[base+(volWidth%BLK_WIDTH)-1] = prevVal = curVal;
for (int i = (volWidth%BLK_WIDTH) - 2; i >= 0; i--) {
curVal = s_data[tk][tj][i];
const float4 newVal = filterHorOperation4(curVal, prevVal, a);
g_data[base+i] = prevVal = newVal;
}
}
}
void
computeConstants(const float a)
{
// Compute AFP = F(Ir, 0), where Ir is an rxr unit matrix
// and 0 is an rxb zero matrix. In this case, r=1, and b=BLK_WIDTH.
float4 h_AFP[1][BLK_WIDTH];
float4 prevVal = make_float4(1.f, 1.f, 1.f, 1.f);
for (int i = 0; i < BLK_WIDTH; i++) {
const float4 curVal = make_float4(0.f, 0.f, 0.f, 0.f);
h_AFP[0][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
// Compute ARE = R(0, Ir)
float4 h_ARE[1][BLK_WIDTH];
prevVal = make_float4(1.f, 1.f, 1.f, 1.f);
for (int i = BLK_WIDTH-1; i >= 0; i--) {
const float4 curVal = make_float4(0.f, 0.f, 0.f, 0.f);
h_ARE[0][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
// // Compute AFB = F(0, Ib), where Ib is a bxb unit matrix and 0 is
// // a bxr zero matrix. In this case, r=1, and b=BLK_WIDTH=BLK_HEIGHT.
// float4 h_AFB[BLK_HEIGHT][BLK_WIDTH];
//
// for (int j = 0; j < BLK_HEIGHT; j++) {
// prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
// for (int i = 0; i < BLK_WIDTH; i++) {
// const float4 curVal = (i != j) ? make_float4(0.f, 0.f, 0.f, 0.f)
// : make_float4(1.f, 1.f, 1.f, 1.f);
// h_AFB[j][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
// }
// }
// Compute ARB = R(Ib, 0)
float4 h_ARB[BLK_HEIGHT][BLK_WIDTH];
for (int j = 0; j < BLK_HEIGHT; j++) {
prevVal = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = BLK_WIDTH; i >= 0; i--) {
const float4 curVal = (i != j) ? make_float4(0.f, 0.f, 0.f, 0.f)
: make_float4(1.f, 1.f, 1.f, 1.f);
h_ARB[j][i] = prevVal = filterHorOperation4(curVal, prevVal, a);
}
}
// Compute AbF = T(AFP) and AbR = H(ARE)
const float4 h_AbF = h_AFP[0][BLK_WIDTH-1];
const float4 h_AbR = h_ARE[0][0];
// Compute H(ARB) * AFP
float4 h_HARB_AFP = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = 0; i < BLK_HEIGHT; i++) {
h_HARB_AFP = multiplyAdd(h_HARB_AFP, h_AFP[0][i], h_ARB[i][0]);
}
// Load symbols into constant memory
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_AFP, &h_AFP[0], sizeof(h_AFP),
0, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_AbF, &h_AbF, sizeof(h_AbF),
0, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_AbR, &h_AbR, sizeof(h_AbR),
0, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_HARB_AFP, &h_HARB_AFP,
sizeof(h_HARB_AFP),
0, cudaMemcpyHostToDevice));
}
void
computePYbarAndEZhat(
float4 *const d_pybar, float4 *const d_ezhat,
const int depth, const int height, const int width,
const float a)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(width, BLK_WIDTH),
ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
kernComputePYbarAndEzhat<<<grdSize, blkSize>>>
(d_pybar, d_ezhat, depth, height, width, a);
CUDA_CHECK_RETURN(cudaThreadSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
}
void
computePYandEZ(
float4 *const d_pybar, float4 *const d_ezhat,
const int depth, const int height, const int width)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
kernComputePYandEZ<<<grdSize, blkSize>>>
(d_pybar, d_ezhat, depth, height, width);
CUDA_CHECK_RETURN(cudaThreadSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
}
void
computeYandZ(
float4 *const d_data,
const float4 *const d_py, const float4 *const d_ez,
const int depth, const int height, const int width,
const float a)
{
const dim3 blkSize(BLK_HEIGHT, BLK_DEPTH);
const dim3 grdSize(ceil_div(width, BLK_WIDTH),
ceil_div(height, BLK_HEIGHT),
ceil_div(depth, BLK_DEPTH));
kernComputeYandZ<<<grdSize, blkSize>>>
(d_data, d_py, d_ez, depth, height, width, a);
CUDA_CHECK_RETURN(cudaThreadSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
}
void
computeBlockResultFromTexture(
float4 *const h_resData,
const float4 *const h_origData,
const int depth, const int height, const int width,
const float a)
{
const int dataSize = depth * height * width;
// Copy original data in GPU
float4 *d_origData;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_origData,
sizeof(*d_origData) * dataSize));
CUDA_CHECK_RETURN(cudaMemcpy(d_origData, h_origData,
sizeof(*d_origData) * dataSize,
cudaMemcpyHostToDevice));
// Generate buffer to store result in GPU
float4 *d_resData;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_resData,
sizeof(*d_resData) * dataSize));
// Generate buffer to store P(Ybar) and E(Zhat)
const int numBlocksDepth = ceil_div(depth, BLK_DEPTH);;
const int numBlocksHeight = ceil_div(height, BLK_HEIGHT);
const int numBlocksWidth = ceil_div(width, BLK_WIDTH);
const int numBlocksTotal = numBlocksDepth * numBlocksHeight * numBlocksWidth;
float4 *d_pybar;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_pybar,
numBlocksTotal * BLK_HEIGHT * BLK_DEPTH * sizeof(*d_pybar)));
float4 *d_ezhat;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d_ezhat,
numBlocksTotal * BLK_HEIGHT * BLK_DEPTH * sizeof(*d_pybar)));
CUDA_CHECK_RETURN(cudaBindTexture(0, t_dataB4, d_origData,
sizeof(*d_origData) * dataSize));
for (int i = 0; i < NUM_ITERS; i++) {
computePYbarAndEZhat(d_pybar, d_ezhat,
depth, height, width, a);
computePYandEZ(d_pybar, d_ezhat, depth, height, width);
computeYandZ(d_resData, d_pybar, d_ezhat,
depth, height, width, a);
}
// Unbind texture
CUDA_CHECK_RETURN(cudaUnbindTexture(t_dataB4));
// Copy result into CPU memory
CUDA_CHECK_RETURN(cudaMemcpy(h_resData, d_resData,
sizeof(*d_origData) * dataSize,
cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(d_origData));
CUDA_CHECK_RETURN(cudaFree(d_resData));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.