hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
409877774841ac4126cdd55835e2f4ee228cfc77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <unistd.h>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#define INF 9999999
#define OFFSET 0
using namespace std;
namespace caffe {
template <typename Dtype>
__global__ void CountLabel(const int nthreads,
const Dtype* correlation, const int target,
const int height, const int width, int* label_set, int* iSegments) {
CUDA_KERNEL_LOOP(index, nthreads) {
*iSegments = 0;
for(int h = 0;h < height;++h)
{
for(int w = 0;w < width;++w)
{
int label = correlation[target * height * width + h * width + w];
int k;
for(k = 0;k < *iSegments;++k)
{
if(label == label_set[k])
{
break;
}
}
if(k == *iSegments)
{
label_set[k] = label;
*iSegments = *iSegments + 1;
}
}
}
for(int j = 0; j < *iSegments-1; j++)
{
for(int i = 0; i < *iSegments-1-j; i++)
{
if(label_set[i] > label_set[i + 1])
{
int temp = label_set[i];
label_set[i] = label_set[i + 1];
label_set[i + 1] = temp;
}
}
}
}
}
template <typename Dtype>
__global__ void Forward(const int nthreads,
const Dtype* bottom_data, const Dtype* correlation,
const int target, const Dtype* label_set, const int iSegments, int maxSegments,
const int channels, const int height, const int width,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
int label = correlation[target * height * width + h * width + w];
int k;
for(k = 0;k < iSegments;++k)
{
if(label == label_set[k])
{
break;
}
}
if (k < iSegments)
{
top_data[index] = bottom_data[c*maxSegments + k];
}
}
}
template <typename Dtype>
void R2PLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* corr_data = bottom[1]->gpu_data();
const Dtype* label_set = bottom[3]->gpu_data();
const int target = static_cast<int>(*(bottom[2]->cpu_data()));
const int isegment = static_cast<int>(*(bottom[4]->cpu_data()));
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0.), top_data);
int count;
// pooling
count = top[0]->count();
hipLaunchKernelGGL(( Forward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, corr_data, target, label_set, isegment, maxSegments_, channels_, height_, width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void Backward(const int nthreads,
const Dtype* top_diff, const Dtype* correlation,
const int target, const Dtype* label_set, const int iSegments, const int maxSegments,
const int channels, const int height, const int width,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int s = index % maxSegments;
if (s >= iSegments)
{
return;
}
const int c = (index / maxSegments) % channels;
int label = label_set[s];
int iNum = 0;
for(int h = 0;h < height;++h)
{
for(int w = 0;w < width;++w)
{
if (label == correlation[target * height * width + h * width + w])
{
bottom_diff[index] += top_diff[c*height*width + h*width + w];
iNum++;
}
}
}
bottom_diff[index] = bottom_diff[index]/iNum;
}
}
template <typename Dtype>
void R2PLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff);
const Dtype* corr_data = bottom[1]->gpu_data();
const Dtype* label_set = bottom[3]->gpu_data();
const int target = static_cast<int>(*(bottom[2]->cpu_data()));
const int isegment = static_cast<int>(*(bottom[4]->cpu_data()));
int count = channels_*maxSegments_;
hipLaunchKernelGGL(( Backward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, corr_data, target, label_set, isegment, maxSegments_, channels_, height_, width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(R2PLayer);
} // namespace caffe
| 409877774841ac4126cdd55835e2f4ee228cfc77.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include <iostream>
#include <cstdlib>
#include <unistd.h>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#define INF 9999999
#define OFFSET 0
using namespace std;
namespace caffe {
template <typename Dtype>
__global__ void CountLabel(const int nthreads,
const Dtype* correlation, const int target,
const int height, const int width, int* label_set, int* iSegments) {
CUDA_KERNEL_LOOP(index, nthreads) {
*iSegments = 0;
for(int h = 0;h < height;++h)
{
for(int w = 0;w < width;++w)
{
int label = correlation[target * height * width + h * width + w];
int k;
for(k = 0;k < *iSegments;++k)
{
if(label == label_set[k])
{
break;
}
}
if(k == *iSegments)
{
label_set[k] = label;
*iSegments = *iSegments + 1;
}
}
}
for(int j = 0; j < *iSegments-1; j++)
{
for(int i = 0; i < *iSegments-1-j; i++)
{
if(label_set[i] > label_set[i + 1])
{
int temp = label_set[i];
label_set[i] = label_set[i + 1];
label_set[i + 1] = temp;
}
}
}
}
}
template <typename Dtype>
__global__ void Forward(const int nthreads,
const Dtype* bottom_data, const Dtype* correlation,
const int target, const Dtype* label_set, const int iSegments, int maxSegments,
const int channels, const int height, const int width,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
int label = correlation[target * height * width + h * width + w];
int k;
for(k = 0;k < iSegments;++k)
{
if(label == label_set[k])
{
break;
}
}
if (k < iSegments)
{
top_data[index] = bottom_data[c*maxSegments + k];
}
}
}
template <typename Dtype>
void R2PLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* corr_data = bottom[1]->gpu_data();
const Dtype* label_set = bottom[3]->gpu_data();
const int target = static_cast<int>(*(bottom[2]->cpu_data()));
const int isegment = static_cast<int>(*(bottom[4]->cpu_data()));
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0.), top_data);
int count;
// pooling
count = top[0]->count();
Forward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom_data, corr_data, target, label_set, isegment, maxSegments_, channels_, height_, width_, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void Backward(const int nthreads,
const Dtype* top_diff, const Dtype* correlation,
const int target, const Dtype* label_set, const int iSegments, const int maxSegments,
const int channels, const int height, const int width,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int s = index % maxSegments;
if (s >= iSegments)
{
return;
}
const int c = (index / maxSegments) % channels;
int label = label_set[s];
int iNum = 0;
for(int h = 0;h < height;++h)
{
for(int w = 0;w < width;++w)
{
if (label == correlation[target * height * width + h * width + w])
{
bottom_diff[index] += top_diff[c*height*width + h*width + w];
iNum++;
}
}
}
bottom_diff[index] = bottom_diff[index]/iNum;
}
}
template <typename Dtype>
void R2PLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), Dtype(0.), bottom_diff);
const Dtype* corr_data = bottom[1]->gpu_data();
const Dtype* label_set = bottom[3]->gpu_data();
const int target = static_cast<int>(*(bottom[2]->cpu_data()));
const int isegment = static_cast<int>(*(bottom[4]->cpu_data()));
int count = channels_*maxSegments_;
Backward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, top_diff, corr_data, target, label_set, isegment, maxSegments_, channels_, height_, width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(R2PLayer);
} // namespace caffe
|
8ec5564620e4cc7bcf6472e4685c8681839942b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dpCudaOxxxxx.hpp"
#include "errorCheck.hpp"
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
#define BEGIN hipEventRecord(begin, 0);
#define END hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&delTime, begin, end);
__device__ void oxxxxx(double* p, double fmass, int nhel, int nsf, cmplx* fo){
fo[4] = cmplx(p[0]*nsf, p[3]*nsf);
fo[5] = cmplx(p[1]*nsf, p[2]*nsf);
int nh = nhel*nsf;
cmplx chi[2];
if (fmass!=0.) {
double pp = fmin(p[0],sqrt(p[1]*p[1] + p[2]*p[2] + p[3]*p[3] ) );
if (pp==0.) {
double sqm[2];
sqm[0] = sqrt(fabs(fmass) );
sqm[1] = copysign(sqm[0], fmass);
int ip = -(1+nh)/2;
int im = (1-nh)/2;
fo[0] = cmplx((double)(im) *sqm[im]);
fo[1] = cmplx((double)(ip*nsf)*sqm[im]);
fo[2] = cmplx((double)(im*nsf)*sqm[-ip]);
fo[3] = cmplx((double)(ip) *sqm[-ip]);
}
else {
double sf[2],omega[2];
sf[0] = (double)(1 + nsf + (1-nsf)*nh)*0.5;
sf[1] = (double)(1 + nsf - (1-nsf)*nh)*0.5;
omega[0] = sqrt(p[0]+pp);
omega[1] = fmass*(1./omega[0]);
double pp3 = fmax(pp+p[3],0.);
chi[0] = cmplx(sqrt(pp3*0.5*(1./pp)));
if (pp3==0.) {
chi[1] = cmplx((double)(nh));
}
else {
chi[1] = cmplx((double)(nh)*p[1],-p[2])/sqrt(2.*pp*pp3) ;
}
int ip = (3+nh)/2-1;
int im = (3-nh)/2-1;
fo[0] = sf[1]*omega[im]*chi[im];
fo[1] = sf[1]*omega[im]*chi[ip];
fo[2] = sf[0]*omega[ip]*chi[im];
fo[3] = sf[0]*omega[ip]*chi[ip];
}
}
else {
double sqp0p3;
if (p[1]==0. && p[2]==0. && p[3]<0.) {
sqp0p3 = 0.;
}
else {
sqp0p3 = sqrt(fmax(p[0]+p[3],0.))*(double)(nsf);
}
chi[0] = cmplx(sqp0p3);
if (sqp0p3==0.) {
chi[1] = cmplx( (double)(nhel)*sqrt(2.*p[0]) );
}
else {
chi[1] = (1./sqp0p3) * cmplx( (double)(nh)*p[1],-p[2] );
}
cmplx czero = cmplx(0.,0.);
if (nh==1) {
fo[0] = chi[0];
fo[1] = chi[1];
fo[2] = czero;
fo[3] = czero;
}
else {
fo[0] = czero;
fo[1] = czero;
fo[2] = chi[1];
fo[3] = chi[0];
}
}
}
// Kernel that executes on the CUDA device
__global__ void Oxxxxx(double *P_d, cmplx *Fo_d, int Psize){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double fmass = 124.412;
int nhel = 2;
int nsf = 4;
double P[4];
cmplx Fo[6];
P[0] = P_d[idx*4 + 0];
P[1] = P_d[idx*4 + 1];
P[2] = P_d[idx*4 + 2];
P[3] = P_d[idx*4 + 3];
if (idx*4 < Psize){
oxxxxx(P, fmass, nhel, nsf, Fo);
}
else
return;
//synch?
Fo_d[6*idx + 0] = Fo[0];
Fo_d[6*idx + 1] = Fo[1];
Fo_d[6*idx + 2] = Fo[2];
Fo_d[6*idx + 3] = Fo[3];
Fo_d[6*idx + 4] = Fo[4];
Fo_d[6*idx + 5] = Fo[5];
}
//notice unused parameters for CUDA kernel:
dpCudaOxxxxx::dpCudaOxxxxx(cl_context ctx, cl_command_queue q){
workDimension = ONE_D;
//name is same as cl alternative allowing the analysis script to later figure
//out this measurement was from a cuda kernel by inspecting the platform id from dpClient
name = "Oxxxxx";
hipEventCreate(&begin);
hipEventCreate(&end);
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
cudaErrChk(hipPeekAtLastError());
}
void dpCudaOxxxxx::setup(int dataMB, int xLocal, int yLocal, int zLocal){
localSize[0] = localSize[1] = localSize[2] = 1;
Psize = 1048576*dataMB/(sizeof(double)*4);
MB = Psize * (sizeof(double)*4) / 1048576;
}
void dpCudaOxxxxx::init(){
//allocate local memory for original array
P = new double[4*Psize];
Fo = new cmplx[6*Psize];
if(!P || !Fo)
fprintf(stderr, "error in malloc\n");
generateArray(P, Psize);
dataParameters.push_back(Psize);
dataNames.push_back("nElements");
}
void dpCudaOxxxxx::memoryCopyOut(){
BEGIN
cudaErrChk( hipMalloc((void **) &P_d, Psize*sizeof(double)*4 ));
cudaErrChk( hipMalloc((void **) &Fo_d, Psize*sizeof(cmplx)*6 ));
cudaErrChk( hipMemcpy(P_d, P, Psize*sizeof(double)*4, hipMemcpyHostToDevice) );
END
}
void dpCudaOxxxxx::plan(){
BEGIN
blockSize = props.maxThreadsPerBlock;
lastBlock = 0;
nBlocks = Psize/blockSize; //nblocks = ceil(Psize/blockSize)
if (Psize%blockSize != 0)
nBlocks++;
if (nBlocks > 65535)
nBlocks = 65535;
nKernels = nBlocks / 65535;
if (nKernels == 0){
lastBlock = nBlocks; //run normally
}
else
lastBlock = nBlocks % 65535; //run repeated
END
}
int dpCudaOxxxxx::execute(){
hipError_t err;
int stride = blockSize*nBlocks;
int lastStride = blockSize * lastBlock;
BEGIN
for (int i = 0; i < nKernels; i++){
hipLaunchKernelGGL(( Oxxxxx) , dim3(nBlocks), dim3(blockSize) , 0, 0, P_d + (i*stride), Fo_d + (i*stride), Psize - (i*stride));
}
if (lastBlock != 0){
hipLaunchKernelGGL(( Oxxxxx) , dim3(lastBlock), dim3(blockSize) , 0, 0, P_d + (nKernels*lastStride), Fo_d + (nKernels*lastStride), Psize - (nKernels*lastStride));
}
err = hipPeekAtLastError();
cudaErrChk(err);
cudaErrChk(hipDeviceSynchronize());
END
if(err!=hipSuccess)
return -1;
return 0;
}
void dpCudaOxxxxx::memoryCopyIn(){
BEGIN
cudaErrChk(hipMemcpy(Fo, Fo_d, Psize*sizeof(cmplx)*6, hipMemcpyDeviceToHost));
END
}
void dpCudaOxxxxx::cleanUp(){
hipFree(P_d);
hipFree(Fo_d);
delete[] P;
delete[] Fo;
}
void dpCudaOxxxxx::generateArray(double *P, int N){
int i;
srand(time(NULL));
for (i=0; i < N - 4; i=i+4){
P[i+0]=rand() / (RAND_MAX/99999.9 + 1);
P[i+1]=rand() / (RAND_MAX/99999.9 + 1);
P[i+2]=rand() / (RAND_MAX/99999.9 + 1);
P[i+3]=rand() / (RAND_MAX/99999.9 + 1);
}
}
| 8ec5564620e4cc7bcf6472e4685c8681839942b7.cu | #include "dpCudaOxxxxx.hpp"
#include "errorCheck.hpp"
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
#define BEGIN cudaEventRecord(begin, 0);
#define END cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&delTime, begin, end);
__device__ void oxxxxx(double* p, double fmass, int nhel, int nsf, cmplx* fo){
fo[4] = cmplx(p[0]*nsf, p[3]*nsf);
fo[5] = cmplx(p[1]*nsf, p[2]*nsf);
int nh = nhel*nsf;
cmplx chi[2];
if (fmass!=0.) {
double pp = fmin(p[0],sqrt(p[1]*p[1] + p[2]*p[2] + p[3]*p[3] ) );
if (pp==0.) {
double sqm[2];
sqm[0] = sqrt(fabs(fmass) );
sqm[1] = copysign(sqm[0], fmass);
int ip = -(1+nh)/2;
int im = (1-nh)/2;
fo[0] = cmplx((double)(im) *sqm[im]);
fo[1] = cmplx((double)(ip*nsf)*sqm[im]);
fo[2] = cmplx((double)(im*nsf)*sqm[-ip]);
fo[3] = cmplx((double)(ip) *sqm[-ip]);
}
else {
double sf[2],omega[2];
sf[0] = (double)(1 + nsf + (1-nsf)*nh)*0.5;
sf[1] = (double)(1 + nsf - (1-nsf)*nh)*0.5;
omega[0] = sqrt(p[0]+pp);
omega[1] = fmass*(1./omega[0]);
double pp3 = fmax(pp+p[3],0.);
chi[0] = cmplx(sqrt(pp3*0.5*(1./pp)));
if (pp3==0.) {
chi[1] = cmplx((double)(nh));
}
else {
chi[1] = cmplx((double)(nh)*p[1],-p[2])/sqrt(2.*pp*pp3) ;
}
int ip = (3+nh)/2-1;
int im = (3-nh)/2-1;
fo[0] = sf[1]*omega[im]*chi[im];
fo[1] = sf[1]*omega[im]*chi[ip];
fo[2] = sf[0]*omega[ip]*chi[im];
fo[3] = sf[0]*omega[ip]*chi[ip];
}
}
else {
double sqp0p3;
if (p[1]==0. && p[2]==0. && p[3]<0.) {
sqp0p3 = 0.;
}
else {
sqp0p3 = sqrt(fmax(p[0]+p[3],0.))*(double)(nsf);
}
chi[0] = cmplx(sqp0p3);
if (sqp0p3==0.) {
chi[1] = cmplx( (double)(nhel)*sqrt(2.*p[0]) );
}
else {
chi[1] = (1./sqp0p3) * cmplx( (double)(nh)*p[1],-p[2] );
}
cmplx czero = cmplx(0.,0.);
if (nh==1) {
fo[0] = chi[0];
fo[1] = chi[1];
fo[2] = czero;
fo[3] = czero;
}
else {
fo[0] = czero;
fo[1] = czero;
fo[2] = chi[1];
fo[3] = chi[0];
}
}
}
// Kernel that executes on the CUDA device
__global__ void Oxxxxx(double *P_d, cmplx *Fo_d, int Psize){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double fmass = 124.412;
int nhel = 2;
int nsf = 4;
double P[4];
cmplx Fo[6];
P[0] = P_d[idx*4 + 0];
P[1] = P_d[idx*4 + 1];
P[2] = P_d[idx*4 + 2];
P[3] = P_d[idx*4 + 3];
if (idx*4 < Psize){
oxxxxx(P, fmass, nhel, nsf, Fo);
}
else
return;
//synch?
Fo_d[6*idx + 0] = Fo[0];
Fo_d[6*idx + 1] = Fo[1];
Fo_d[6*idx + 2] = Fo[2];
Fo_d[6*idx + 3] = Fo[3];
Fo_d[6*idx + 4] = Fo[4];
Fo_d[6*idx + 5] = Fo[5];
}
//notice unused parameters for CUDA kernel:
dpCudaOxxxxx::dpCudaOxxxxx(cl_context ctx, cl_command_queue q){
workDimension = ONE_D;
//name is same as cl alternative allowing the analysis script to later figure
//out this measurement was from a cuda kernel by inspecting the platform id from dpClient
name = "Oxxxxx";
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
cudaErrChk(cudaPeekAtLastError());
}
void dpCudaOxxxxx::setup(int dataMB, int xLocal, int yLocal, int zLocal){
localSize[0] = localSize[1] = localSize[2] = 1;
Psize = 1048576*dataMB/(sizeof(double)*4);
MB = Psize * (sizeof(double)*4) / 1048576;
}
void dpCudaOxxxxx::init(){
//allocate local memory for original array
P = new double[4*Psize];
Fo = new cmplx[6*Psize];
if(!P || !Fo)
fprintf(stderr, "error in malloc\n");
generateArray(P, Psize);
dataParameters.push_back(Psize);
dataNames.push_back("nElements");
}
void dpCudaOxxxxx::memoryCopyOut(){
BEGIN
cudaErrChk( cudaMalloc((void **) &P_d, Psize*sizeof(double)*4 ));
cudaErrChk( cudaMalloc((void **) &Fo_d, Psize*sizeof(cmplx)*6 ));
cudaErrChk( cudaMemcpy(P_d, P, Psize*sizeof(double)*4, cudaMemcpyHostToDevice) );
END
}
void dpCudaOxxxxx::plan(){
BEGIN
blockSize = props.maxThreadsPerBlock;
lastBlock = 0;
nBlocks = Psize/blockSize; //nblocks = ceil(Psize/blockSize)
if (Psize%blockSize != 0)
nBlocks++;
if (nBlocks > 65535)
nBlocks = 65535;
nKernels = nBlocks / 65535;
if (nKernels == 0){
lastBlock = nBlocks; //run normally
}
else
lastBlock = nBlocks % 65535; //run repeated
END
}
int dpCudaOxxxxx::execute(){
cudaError_t err;
int stride = blockSize*nBlocks;
int lastStride = blockSize * lastBlock;
BEGIN
for (int i = 0; i < nKernels; i++){
Oxxxxx <<< nBlocks, blockSize >>> (P_d + (i*stride), Fo_d + (i*stride), Psize - (i*stride));
}
if (lastBlock != 0){
Oxxxxx <<<lastBlock, blockSize >>> (P_d + (nKernels*lastStride), Fo_d + (nKernels*lastStride), Psize - (nKernels*lastStride));
}
err = cudaPeekAtLastError();
cudaErrChk(err);
cudaErrChk(cudaDeviceSynchronize());
END
if(err!=cudaSuccess)
return -1;
return 0;
}
void dpCudaOxxxxx::memoryCopyIn(){
BEGIN
cudaErrChk(cudaMemcpy(Fo, Fo_d, Psize*sizeof(cmplx)*6, cudaMemcpyDeviceToHost));
END
}
void dpCudaOxxxxx::cleanUp(){
cudaFree(P_d);
cudaFree(Fo_d);
delete[] P;
delete[] Fo;
}
void dpCudaOxxxxx::generateArray(double *P, int N){
int i;
srand(time(NULL));
for (i=0; i < N - 4; i=i+4){
P[i+0]=rand() / (RAND_MAX/99999.9 + 1);
P[i+1]=rand() / (RAND_MAX/99999.9 + 1);
P[i+2]=rand() / (RAND_MAX/99999.9 + 1);
P[i+3]=rand() / (RAND_MAX/99999.9 + 1);
}
}
|
42a8578939fefbc4d830fba788ee88bd0dd80c03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Thu Oct 8 23:05:49 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_dmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
double *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_D_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmconjugate(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
hipLaunchKernelGGL(( magma_dmconjugate_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
A->num_rows, A->drow, A->dval );
return info;
}
| 42a8578939fefbc4d830fba788ee88bd0dd80c03.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zmconjugate.cu, normal z -> d, Thu Oct 8 23:05:49 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
__global__ void
magma_dmconjugate_kernel(
int num_rows,
magma_index_t *rowptr,
double *values )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i = rowptr[row]; i < rowptr[row+1]; i++){
values[i] = MAGMA_D_CONJ( values[i] );
}
}
}
/**
Purpose
-------
This function conjugates a matrix. For a real matrix, no value is changed.
Arguments
---------
@param[in,out]
A magma_d_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_daux
********************************************************************/
extern "C" magma_int_t
magma_dmconjugate(
magma_d_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 grid( magma_ceildiv( A->num_rows, BLOCK_SIZE ));
magma_dmconjugate_kernel<<< grid, BLOCK_SIZE, 0, queue->cuda_stream() >>>
( A->num_rows, A->drow, A->dval );
return info;
}
|
96d5e83260b05a418d90255c653d988bffef9104.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:ASSERTION_ERROR
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
__global__ void foo(int * A) {
A[0] = 1;
A[1] = 1;
A[2] = 1;
A[threadIdx.x] = 0;
__assert(A[0] == 1 | A[1] == 1 | A[2] == 1);
}
| 96d5e83260b05a418d90255c653d988bffef9104.cu | //xfail:ASSERTION_ERROR
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
__global__ void foo(int * A) {
A[0] = 1;
A[1] = 1;
A[2] = 1;
A[threadIdx.x] = 0;
__assert(A[0] == 1 | A[1] == 1 | A[2] == 1);
}
|
e34364da9a696152641ae09d22aa73794301ef5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "../common.h"
int main(int argc, char **argv) {
int dev = 0;
hipSetDevice(dev);
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbytes %5.2fMB\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f));
float *h_a;
CHECK(hipHostMalloc(&h_a, nbytes));
float *d_a;
CHECK(hipMalloc((float**)&d_a, nbytes));
for (unsigned int i=0; i<isize; i++) h_a[i] = 0.5f;
hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice);
hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost);
hipFree(d_a);
hipHostFree(h_a);
hipDeviceReset();
return EXIT_SUCCESS;
} | e34364da9a696152641ae09d22aa73794301ef5a.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "../common.h"
int main(int argc, char **argv) {
int dev = 0;
cudaSetDevice(dev);
unsigned int isize = 1 << 22;
unsigned int nbytes = isize * sizeof(float);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s starting at ", argv[0]);
printf("device %d: %s memory size %d nbytes %5.2fMB\n", dev, deviceProp.name, isize, nbytes / (1024.0f * 1024.0f));
float *h_a;
CHECK(cudaMallocHost(&h_a, nbytes));
float *d_a;
CHECK(cudaMalloc((float**)&d_a, nbytes));
for (unsigned int i=0; i<isize; i++) h_a[i] = 0.5f;
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFreeHost(h_a);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
1fea4ac792086fcdb4dbfa1b5794d7395f48cba9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <max_pool.cuh>
__global__ void operator_max_pool_h(
const int nthreads, const float* const bottom_data, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data, float* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
// output location
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
// pooled range
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = fminf(hstart + kernel_h, height);
const int wend = fminf(wstart + kernel_w, width);
hstart = fmaxf(hstart, 0);
wstart = fmaxf(wstart, 0);
// get max value postion
float maxval = -FLT_MAX;
int maxidx = -1;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
// output
top_data[index] = maxval;
// record idx
mask[index] = maxidx;
}
}
void operator_max_pool(const Storage* inputs, Storage* mask, int kernel_h,
int kernel_w, int pad_h, int pad_w, int stride_h,
int stride_w, Storage* output) {
CHECK_EQ(inputs->get_shape().size(), 4,
"operator_max_pool: inputs shape error");
int batch_size = *(inputs->get_shape().rbegin() + 3);
int channels = *(inputs->get_shape().rbegin() + 2);
int height = *(inputs->get_shape().rbegin() + 1);
int width = *(inputs->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const float* inputs_data_ptr = RAW_PTR(inputs->get_data());
float* outputs_data_ptr = RAW_PTR(output->get_data());
float* mask_data_ptr = RAW_PTR(mask->get_data());
int num_kernels = batch_size * channels * pooled_height * pooled_width;
int grid_size = ceil((float)num_kernels / BLOCK_SIZE);
hipLaunchKernelGGL(( operator_max_pool_h), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0,
num_kernels, inputs_data_ptr, channels, height, width, pooled_height,
pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w,
outputs_data_ptr, mask_data_ptr);
CUDA_POST_KERNEL_CHECK;
}
__global__ void operator_d_max_pool_h(
const int nthreads, const float* const top_diff, const float* const mask,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, float* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
// pooled range
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = fminf((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = fminf((w + pad_w) / stride_w + 1, pooled_width);
float gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const float* const top_diff_slice = top_diff + offset;
// get max value idx
const float* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
void operator_d_max_pool(const Storage* output_grads, const Storage* inputs,
const Storage* mask, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h, int stride_w,
Storage* inputs_grad) {
CHECK_EQ(output_grads->get_shape().size(), 4,
"operator_d_max_pool: output_grads shape error");
CHECK_EQ(inputs->get_shape().size(), 4,
"operator_d_max_pool: inputs shape error");
CHECK_EQ(mask->get_shape().size(), 4,
"operator_d_max_pool: mask shape error");
int batch_size = *(inputs->get_shape().rbegin() + 3);
int channels = *(inputs->get_shape().rbegin() + 2);
int height = *(inputs->get_shape().rbegin() + 1);
int width = *(inputs->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const float* inputs_data_ptr = RAW_PTR(inputs->get_data());
const float* outputs_grad_ptr = RAW_PTR(output_grads->get_data());
const float* mask_data_ptr = RAW_PTR(mask->get_data());
float* inputs_grad_ptr = RAW_PTR(inputs_grad->get_data());
int num_kernels = batch_size * channels * height * width;
int grid_size = ceil((float)num_kernels / BLOCK_SIZE);
hipLaunchKernelGGL(( operator_d_max_pool_h), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0,
num_kernels, outputs_grad_ptr, mask_data_ptr, channels, height, width,
pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, inputs_grad_ptr);
CUDA_POST_KERNEL_CHECK;
}
void MaxPool::forward() {
const Storage* input = this->pre->get_output();
int batch_size = *(input->get_shape().rbegin() + 3);
int channels = *(input->get_shape().rbegin() + 2);
int height = *(input->get_shape().rbegin() + 1);
int width = *(input->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
std::vector<int> output_shape{batch_size, channels, pooled_height,
pooled_width};
INIT_STORAGE(this->output, output_shape);
INIT_STORAGE(this->mask, output_shape);
operator_max_pool(input, this->mask.get(), this->kernel_h, this->kernel_w,
this->pad_h, this->pad_w, this->stride_h, this->stride_w,
this->output.get());
}
void MaxPool::backward() {
const Storage* input = this->pre->get_output();
const Storage* output_grad = this->next->get_grad();
INIT_STORAGE(this->grad, input->get_shape());
operator_d_max_pool(output_grad, input, this->mask.get(), this->kernel_h,
this->kernel_w, this->pad_h, this->pad_w, this->stride_h,
this->stride_w, this->grad.get());
} | 1fea4ac792086fcdb4dbfa1b5794d7395f48cba9.cu | #include <max_pool.cuh>
__global__ void operator_max_pool_h(
const int nthreads, const float* const bottom_data, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
float* const top_data, float* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
// output location
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
// pooled range
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = fminf(hstart + kernel_h, height);
const int wend = fminf(wstart + kernel_w, width);
hstart = fmaxf(hstart, 0);
wstart = fmaxf(wstart, 0);
// get max value postion
float maxval = -FLT_MAX;
int maxidx = -1;
const float* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
// output
top_data[index] = maxval;
// record idx
mask[index] = maxidx;
}
}
void operator_max_pool(const Storage* inputs, Storage* mask, int kernel_h,
int kernel_w, int pad_h, int pad_w, int stride_h,
int stride_w, Storage* output) {
CHECK_EQ(inputs->get_shape().size(), 4,
"operator_max_pool: inputs shape error");
int batch_size = *(inputs->get_shape().rbegin() + 3);
int channels = *(inputs->get_shape().rbegin() + 2);
int height = *(inputs->get_shape().rbegin() + 1);
int width = *(inputs->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const float* inputs_data_ptr = RAW_PTR(inputs->get_data());
float* outputs_data_ptr = RAW_PTR(output->get_data());
float* mask_data_ptr = RAW_PTR(mask->get_data());
int num_kernels = batch_size * channels * pooled_height * pooled_width;
int grid_size = ceil((float)num_kernels / BLOCK_SIZE);
operator_max_pool_h<<<grid_size, BLOCK_SIZE>>>(
num_kernels, inputs_data_ptr, channels, height, width, pooled_height,
pooled_width, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w,
outputs_data_ptr, mask_data_ptr);
CUDA_POST_KERNEL_CHECK;
}
__global__ void operator_d_max_pool_h(
const int nthreads, const float* const top_diff, const float* const mask,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, float* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
// pooled range
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = fminf((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = fminf((w + pad_w) / stride_w + 1, pooled_width);
float gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const float* const top_diff_slice = top_diff + offset;
// get max value idx
const float* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
void operator_d_max_pool(const Storage* output_grads, const Storage* inputs,
const Storage* mask, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h, int stride_w,
Storage* inputs_grad) {
CHECK_EQ(output_grads->get_shape().size(), 4,
"operator_d_max_pool: output_grads shape error");
CHECK_EQ(inputs->get_shape().size(), 4,
"operator_d_max_pool: inputs shape error");
CHECK_EQ(mask->get_shape().size(), 4,
"operator_d_max_pool: mask shape error");
int batch_size = *(inputs->get_shape().rbegin() + 3);
int channels = *(inputs->get_shape().rbegin() + 2);
int height = *(inputs->get_shape().rbegin() + 1);
int width = *(inputs->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
const float* inputs_data_ptr = RAW_PTR(inputs->get_data());
const float* outputs_grad_ptr = RAW_PTR(output_grads->get_data());
const float* mask_data_ptr = RAW_PTR(mask->get_data());
float* inputs_grad_ptr = RAW_PTR(inputs_grad->get_data());
int num_kernels = batch_size * channels * height * width;
int grid_size = ceil((float)num_kernels / BLOCK_SIZE);
operator_d_max_pool_h<<<grid_size, BLOCK_SIZE>>>(
num_kernels, outputs_grad_ptr, mask_data_ptr, channels, height, width,
pooled_height, pooled_width, kernel_h, kernel_w, stride_h, stride_w,
pad_h, pad_w, inputs_grad_ptr);
CUDA_POST_KERNEL_CHECK;
}
void MaxPool::forward() {
const Storage* input = this->pre->get_output();
int batch_size = *(input->get_shape().rbegin() + 3);
int channels = *(input->get_shape().rbegin() + 2);
int height = *(input->get_shape().rbegin() + 1);
int width = *(input->get_shape().rbegin());
int pooled_height = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int pooled_width = (width + 2 * pad_w - kernel_w) / stride_w + 1;
std::vector<int> output_shape{batch_size, channels, pooled_height,
pooled_width};
INIT_STORAGE(this->output, output_shape);
INIT_STORAGE(this->mask, output_shape);
operator_max_pool(input, this->mask.get(), this->kernel_h, this->kernel_w,
this->pad_h, this->pad_w, this->stride_h, this->stride_w,
this->output.get());
}
void MaxPool::backward() {
const Storage* input = this->pre->get_output();
const Storage* output_grad = this->next->get_grad();
INIT_STORAGE(this->grad, input->get_shape());
operator_d_max_pool(output_grad, input, this->mask.get(), this->kernel_h,
this->kernel_w, this->pad_h, this->pad_w, this->stride_h,
this->stride_w, this->grad.get());
} |
5ba552beececa04270bdde3b4a7d7083d7f59f0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WIDTH ${width}
#define HEIGHT ${height}
#define NLM_A ${a}
#define NLM_S ${s}
#define NLM_H ((float) (${h} / 79.636080791869483631941455867052))
#define NLM_H2 ((float) (${h2} / 79.636080791869483631941455867052))
#define GET(pointer, y0, x0) pointer[max(min((y0), HEIGHT-1), 0) * WIDTH + max(min((x0), WIDTH-1), 0)]
#define PatchMatrix(y0, x0) GET(srcp, y-NLM_A-NLM_S + (y0) / (2*NLM_A+1) + (x0) / (2*NLM_S+1), x-NLM_A-NLM_S + (y0) % (2*NLM_A+1) + (x0) % (2*NLM_S+1))
#define Square(x) ((x) * (x))
#define PatchSize Square(2 * NLM_S + 1)
#define SearchSize Square(2 * NLM_A + 1)
extern "C" __global__
void compute(const float * __restrict__ srcp, float * __restrict__ dstp) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= WIDTH || y >= HEIGHT)
return;
float PatchWeights[SearchSize];
// CalculatePatchWeights
float NormalizingConstant = 0.f;
for (int i = 0; i < SearchSize; i++) {
float SSE = 0.f;
for (int j = 0; j < PatchSize; j++)
SSE += Square(PatchMatrix(i, j) - PatchMatrix(SearchSize / 2, j));
float Weight = expf(-SSE / Square(NLM_H));
PatchWeights[i] = Weight;
NormalizingConstant += Weight;
}
for (int i = 0; i < SearchSize; i++) {
PatchWeights[i] /= NormalizingConstant;
}
// CalculatePositionWeights & Aggregate
float Result = 0.f;
NormalizingConstant = 0.f;
for (int j = 0; j < PatchSize; j++) {
float SSE = 0.f;
for (int i = 0; i < SearchSize; i++)
SSE += PatchWeights[i] * Square(PatchMatrix(i, j) - PatchMatrix(i, PatchSize / 2));
float Weight = expf(-SSE / Square(NLM_H2));
Result += Weight * PatchMatrix(SearchSize / 2, j);
NormalizingConstant += Weight;
}
GET(dstp, y, x) = Result / NormalizingConstant;
}
| 5ba552beececa04270bdde3b4a7d7083d7f59f0e.cu | #define WIDTH ${width}
#define HEIGHT ${height}
#define NLM_A ${a}
#define NLM_S ${s}
#define NLM_H ((float) (${h} / 79.636080791869483631941455867052))
#define NLM_H2 ((float) (${h2} / 79.636080791869483631941455867052))
#define GET(pointer, y0, x0) pointer[max(min((y0), HEIGHT-1), 0) * WIDTH + max(min((x0), WIDTH-1), 0)]
#define PatchMatrix(y0, x0) GET(srcp, y-NLM_A-NLM_S + (y0) / (2*NLM_A+1) + (x0) / (2*NLM_S+1), x-NLM_A-NLM_S + (y0) % (2*NLM_A+1) + (x0) % (2*NLM_S+1))
#define Square(x) ((x) * (x))
#define PatchSize Square(2 * NLM_S + 1)
#define SearchSize Square(2 * NLM_A + 1)
extern "C" __global__
void compute(const float * __restrict__ srcp, float * __restrict__ dstp) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x >= WIDTH || y >= HEIGHT)
return;
float PatchWeights[SearchSize];
// CalculatePatchWeights
float NormalizingConstant = 0.f;
for (int i = 0; i < SearchSize; i++) {
float SSE = 0.f;
for (int j = 0; j < PatchSize; j++)
SSE += Square(PatchMatrix(i, j) - PatchMatrix(SearchSize / 2, j));
float Weight = expf(-SSE / Square(NLM_H));
PatchWeights[i] = Weight;
NormalizingConstant += Weight;
}
for (int i = 0; i < SearchSize; i++) {
PatchWeights[i] /= NormalizingConstant;
}
// CalculatePositionWeights & Aggregate
float Result = 0.f;
NormalizingConstant = 0.f;
for (int j = 0; j < PatchSize; j++) {
float SSE = 0.f;
for (int i = 0; i < SearchSize; i++)
SSE += PatchWeights[i] * Square(PatchMatrix(i, j) - PatchMatrix(i, PatchSize / 2));
float Weight = expf(-SSE / Square(NLM_H2));
Result += Weight * PatchMatrix(SearchSize / 2, j);
NormalizingConstant += Weight;
}
GET(dstp, y, x) = Result / NormalizingConstant;
}
|
d55ccdcb06576d68113deb7b850654a1a9eadb3a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nvcc -arch=compute_20 -code="sm_20,compute_20" -o smid smid.cu
* ./smid 20
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime.h>
/* E.D. Riedijk */
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
__global__ void kern(int *sm){
if (threadIdx.x==0)
sm[blockIdx.x]=get_smid();
}
int main(int argc, char *argv[]){
int N = atoi(argv[1]);
int *sm, *sm_d;
sm = (int *) malloc(N*sizeof(*sm));
hipMalloc((void**)&sm_d,N*sizeof(*sm_d));
hipLaunchKernelGGL(( kern), dim3(N),dim3(N), 0, 0, sm_d);
hipMemcpy(sm, sm_d, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0;i<N;i++)
printf("%d %d\n",i,sm[i]);
return 0;
}
| d55ccdcb06576d68113deb7b850654a1a9eadb3a.cu | /*
* nvcc -arch=compute_20 -code="sm_20,compute_20" -o smid smid.cu
* ./smid 20
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//#include <cuda_runtime.h>
/* E.D. Riedijk */
__device__ uint get_smid(void) {
uint ret;
asm("mov.u32 %0, %smid;" : "=r"(ret) );
return ret;
}
__global__ void kern(int *sm){
if (threadIdx.x==0)
sm[blockIdx.x]=get_smid();
}
int main(int argc, char *argv[]){
int N = atoi(argv[1]);
int *sm, *sm_d;
sm = (int *) malloc(N*sizeof(*sm));
cudaMalloc((void**)&sm_d,N*sizeof(*sm_d));
kern<<<N,N>>>( sm_d);
cudaMemcpy(sm, sm_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0;i<N;i++)
printf("%d %d\n",i,sm[i]);
return 0;
}
|
04abf543ca67097864d1d5b525363598cb3dd6b5.hip | // !!! This is a file automatically generated by hipify!!!
/*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <algorithm>
#include <sys/time.h>
#include <hip/hip_runtime.h>
static const int ThreadsPerBlock = 512;
static __global__ void collatzKernel(const long range, int* maxlen)
{
// compute sequence lengths
const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x;
long val = idx + 1;
int len = 1;
if(idx < range) {
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
}
if(*maxlen < len)
atomicMax(maxlen, len);
}
static void CheckCuda()
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e));
exit(-1);
}
}
int main(int argc, char *argv[])
{
printf("Collatz v1.1\n");
// check command line
if (argc != 2) {fprintf(stderr, "USAGE: %s range\n", argv[0]); exit(-1);}
const long range = atol(argv[1]);
if (range < 3) {fprintf(stderr, "ERROR: range must be at least 3\n"); exit(-1);}
printf("range bound: %ld\n", range);
//allocate space for device copies
int* dev_maxlen;
const int size = sizeof(int);
hipMalloc((void **)&dev_maxlen, size);
//allocate space for host copies
int* host_maxlen = new int;
*host_maxlen = 0;
if(hipSuccess != hipMemcpy(dev_maxlen, host_maxlen, size, hipMemcpyHostToDevice)) {
fprintf(stderr, "copying to device falied\n");
exit(-1);
}
// start time
timeval start, end;
gettimeofday(&start, NULL);
// call kernel
hipLaunchKernelGGL(( collatzKernel) , dim3((ThreadsPerBlock + range - 1)/ThreadsPerBlock),dim3(ThreadsPerBlock), 0, 0, range, dev_maxlen);
hipDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("compute time: %.4f s\n", runtime);
CheckCuda();
if(hipSuccess != hipMemcpy(host_maxlen, dev_maxlen, size, hipMemcpyDeviceToHost)) {
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// print result
printf("longest sequence: %d elements\n", *host_maxlen);
delete host_maxlen;
hipFree(dev_maxlen);
return 0;
}
| 04abf543ca67097864d1d5b525363598cb3dd6b5.cu | /*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <algorithm>
#include <sys/time.h>
#include <cuda.h>
static const int ThreadsPerBlock = 512;
static __global__ void collatzKernel(const long range, int* maxlen)
{
// compute sequence lengths
const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x;
long val = idx + 1;
int len = 1;
if(idx < range) {
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
}
if(*maxlen < len)
atomicMax(maxlen, len);
}
static void CheckCuda()
{
cudaError_t e;
cudaDeviceSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e));
exit(-1);
}
}
int main(int argc, char *argv[])
{
printf("Collatz v1.1\n");
// check command line
if (argc != 2) {fprintf(stderr, "USAGE: %s range\n", argv[0]); exit(-1);}
const long range = atol(argv[1]);
if (range < 3) {fprintf(stderr, "ERROR: range must be at least 3\n"); exit(-1);}
printf("range bound: %ld\n", range);
//allocate space for device copies
int* dev_maxlen;
const int size = sizeof(int);
cudaMalloc((void **)&dev_maxlen, size);
//allocate space for host copies
int* host_maxlen = new int;
*host_maxlen = 0;
if(cudaSuccess != cudaMemcpy(dev_maxlen, host_maxlen, size, cudaMemcpyHostToDevice)) {
fprintf(stderr, "copying to device falied\n");
exit(-1);
}
// start time
timeval start, end;
gettimeofday(&start, NULL);
// call kernel
collatzKernel <<<(ThreadsPerBlock + range - 1)/ThreadsPerBlock,ThreadsPerBlock>>>(range, dev_maxlen);
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("compute time: %.4f s\n", runtime);
CheckCuda();
if(cudaSuccess != cudaMemcpy(host_maxlen, dev_maxlen, size, cudaMemcpyDeviceToHost)) {
fprintf(stderr, "copying from device failed\n");
exit(-1);
}
// print result
printf("longest sequence: %d elements\n", *host_maxlen);
delete host_maxlen;
cudaFree(dev_maxlen);
return 0;
}
|
c5d228d76d09f3e930a0ac0587f24a8018895396.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace memory {
const int NUM_STREAMS = 8;
const int N = 2;
const float DELTA = 1e-1;
using CudaDevCtxVec = std::vector<std::unique_ptr<platform::CUDADeviceContext>>;
__global__ void kernel(float *x, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = 3.14159 * i;
}
}
void CheckKernelOutput(float *x, int n) {
auto host_x = std::unique_ptr<float[]>(new float[n]);
for (int i = 0; i < n; ++i) {
EXPECT_TRUE(hipSuccess == hipMemcpy(host_x.get(), x, n * sizeof(float),
hipMemcpyDeviceToHost));
EXPECT_GE(host_x[i] + DELTA, 3.14159f * i);
EXPECT_LE(host_x[i] - DELTA, 3.14159f * i);
}
}
void MultiStreamCompute(float **data, float **second_data,
const platform::CUDADeviceContext &ctx) {
// multi-streams
AllocationPtr allocation_ptr = Alloc(ctx, N * sizeof(float));
EXPECT_GE(allocation_ptr->size(), N * sizeof(float));
*data = reinterpret_cast<float *>(allocation_ptr->ptr());
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, ctx.stream(), *data, N);
// allocate and compute on same stream again
allocation_ptr = Alloc(ctx, N * sizeof(float));
EXPECT_GE(allocation_ptr->size(), N * sizeof(float));
*second_data = reinterpret_cast<float *>(allocation_ptr->ptr());
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, ctx.stream(), *second_data, N);
}
TEST(Malloc, CUDADeviceContextMultiStream) {
auto place = platform::CUDAPlace(0);
EXPECT_TRUE(hipSuccess == hipSetDevice(0));
AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float));
EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float));
float *main_stream_data =
reinterpret_cast<float *>(main_stream_alloc_ptr->ptr());
float *data[NUM_STREAMS];
float *second_data[NUM_STREAMS];
CudaDevCtxVec dev_ctx;
// default stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, main_stream_data, N);
main_stream_alloc_ptr.reset();
for (int i = 0; i < NUM_STREAMS; ++i) {
dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>(
new platform::CUDADeviceContext(place)));
MultiStreamCompute(&data[i], &second_data[i], *dev_ctx[i]);
}
EXPECT_TRUE(hipSuccess == hipDeviceSynchronize());
for (int i = 0; i < NUM_STREAMS; ++i) {
CheckKernelOutput(data[i], N);
CheckKernelOutput(second_data[i], N);
}
}
TEST(Malloc, CUDADeviceContextMultiThreadMultiStream) {
auto place = platform::CUDAPlace(0);
EXPECT_TRUE(hipSuccess == hipSetDevice(0));
AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float));
EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float));
float *main_stream_data =
reinterpret_cast<float *>(main_stream_alloc_ptr->ptr());
float *data[NUM_STREAMS];
float *second_data[NUM_STREAMS];
CudaDevCtxVec dev_ctx;
std::vector<std::thread> threads;
// default stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, main_stream_data, N);
main_stream_alloc_ptr.reset();
for (int i = 0; i < NUM_STREAMS; ++i) {
dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>(
new platform::CUDADeviceContext(place)));
threads.push_back(std::thread(MultiStreamCompute, &data[i], &second_data[i],
std::cref(*dev_ctx[i])));
}
for (int i = 0; i < NUM_STREAMS; ++i) {
threads[i].join();
}
EXPECT_TRUE(hipSuccess == hipDeviceSynchronize());
for (int i = 0; i < NUM_STREAMS; ++i) {
CheckKernelOutput(data[i], N);
CheckKernelOutput(second_data[i], N);
}
}
TEST(Malloc, AllocZero) {
auto place = platform::CUDAPlace(0);
AllocationPtr allocation_ptr = Alloc(place, 0);
EXPECT_GE(allocation_ptr->size(), 0);
}
} // namespace memory
} // namespace paddle
| c5d228d76d09f3e930a0ac0587f24a8018895396.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda.h>
#include <cuda_runtime.h>
#include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/platform/device_context.h"
namespace paddle {
namespace memory {
const int NUM_STREAMS = 8;
const int N = 2;
const float DELTA = 1e-1;
using CudaDevCtxVec = std::vector<std::unique_ptr<platform::CUDADeviceContext>>;
__global__ void kernel(float *x, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = 3.14159 * i;
}
}
void CheckKernelOutput(float *x, int n) {
auto host_x = std::unique_ptr<float[]>(new float[n]);
for (int i = 0; i < n; ++i) {
EXPECT_TRUE(cudaSuccess == cudaMemcpy(host_x.get(), x, n * sizeof(float),
cudaMemcpyDeviceToHost));
EXPECT_GE(host_x[i] + DELTA, 3.14159f * i);
EXPECT_LE(host_x[i] - DELTA, 3.14159f * i);
}
}
void MultiStreamCompute(float **data, float **second_data,
const platform::CUDADeviceContext &ctx) {
// multi-streams
AllocationPtr allocation_ptr = Alloc(ctx, N * sizeof(float));
EXPECT_GE(allocation_ptr->size(), N * sizeof(float));
*data = reinterpret_cast<float *>(allocation_ptr->ptr());
kernel<<<1, 64, 0, ctx.stream()>>>(*data, N);
// allocate and compute on same stream again
allocation_ptr = Alloc(ctx, N * sizeof(float));
EXPECT_GE(allocation_ptr->size(), N * sizeof(float));
*second_data = reinterpret_cast<float *>(allocation_ptr->ptr());
kernel<<<1, 64, 0, ctx.stream()>>>(*second_data, N);
}
TEST(Malloc, CUDADeviceContextMultiStream) {
auto place = platform::CUDAPlace(0);
EXPECT_TRUE(cudaSuccess == cudaSetDevice(0));
AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float));
EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float));
float *main_stream_data =
reinterpret_cast<float *>(main_stream_alloc_ptr->ptr());
float *data[NUM_STREAMS];
float *second_data[NUM_STREAMS];
CudaDevCtxVec dev_ctx;
// default stream
kernel<<<1, 64>>>(main_stream_data, N);
main_stream_alloc_ptr.reset();
for (int i = 0; i < NUM_STREAMS; ++i) {
dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>(
new platform::CUDADeviceContext(place)));
MultiStreamCompute(&data[i], &second_data[i], *dev_ctx[i]);
}
EXPECT_TRUE(cudaSuccess == cudaDeviceSynchronize());
for (int i = 0; i < NUM_STREAMS; ++i) {
CheckKernelOutput(data[i], N);
CheckKernelOutput(second_data[i], N);
}
}
TEST(Malloc, CUDADeviceContextMultiThreadMultiStream) {
auto place = platform::CUDAPlace(0);
EXPECT_TRUE(cudaSuccess == cudaSetDevice(0));
AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float));
EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float));
float *main_stream_data =
reinterpret_cast<float *>(main_stream_alloc_ptr->ptr());
float *data[NUM_STREAMS];
float *second_data[NUM_STREAMS];
CudaDevCtxVec dev_ctx;
std::vector<std::thread> threads;
// default stream
kernel<<<1, 64>>>(main_stream_data, N);
main_stream_alloc_ptr.reset();
for (int i = 0; i < NUM_STREAMS; ++i) {
dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>(
new platform::CUDADeviceContext(place)));
threads.push_back(std::thread(MultiStreamCompute, &data[i], &second_data[i],
std::cref(*dev_ctx[i])));
}
for (int i = 0; i < NUM_STREAMS; ++i) {
threads[i].join();
}
EXPECT_TRUE(cudaSuccess == cudaDeviceSynchronize());
for (int i = 0; i < NUM_STREAMS; ++i) {
CheckKernelOutput(data[i], N);
CheckKernelOutput(second_data[i], N);
}
}
TEST(Malloc, AllocZero) {
auto place = platform::CUDAPlace(0);
AllocationPtr allocation_ptr = Alloc(place, 0);
EXPECT_GE(allocation_ptr->size(), 0);
}
} // namespace memory
} // namespace paddle
|
b88a6c7cd31c178eb6d1eb33bdf85bf115e28072.hip | // !!! This is a file automatically generated by hipify!!!
#include "protonPreconditioner.h"
#include "vector_td_utilities.h"
#include "cuCgSolver.h"
#include "vector_td.h"
#include "cuNDFFT.h"
#include "radial_utilities.h"
#include "cuNFFT.h"
#include "cuNFFTOperator.h"
#include "cuNDArray_math.h"
#include "hoNDArray_fileio.h"
#include <thrust/sort.h>
#include <thrust/transform.h>
using namespace Gadgetron;
void protonPreconditioner::apply(cuNDArray<float> * in, cuNDArray<float> * out){
boost::shared_ptr<cuNDArray<float_complext> > complex_in =real_to_complex<float_complext>(in);
uint64d2 dims = from_std_vector<size_t,2>(*in->get_dimensions());
complex_in = pad<float_complext,2>(dims*size_t(2),complex_in.get());
cuNDFFT<float>::instance()->fft(complex_in.get());
*complex_in *= *kernel_;
cuNDFFT<float>::instance()->ifft(complex_in.get());
*out = *crop<float,2>(dims/size_t(2),dims,real<float_complext>(complex_in.get()).get());
if (hull_.get()) *out *= *hull_;
}
static float find_percentile(cuNDArray<float_complext>* arr,float fraction){
boost::shared_ptr<cuNDArray<float> > absarr = abs(arr);
thrust::sort(absarr->begin(),absarr->end());
return absarr->at((size_t)(absarr->get_number_of_elements()*fraction));
}
struct precon_cutoff : public thrust::unary_function<float_complext,float_complext>
{
precon_cutoff(float cutoff){
_cutoff = cutoff;
}
__device__ float_complext operator()(const float_complext &x) const {
float ax = abs(x);
//if (ax < _cutoff) return x*exp(-(_cutoff-ax)*(_cutoff-ax)/(_cutoff*_cutoff));
if (ax < _cutoff) return float_complext(0);
else return x;
}
float _cutoff;
};
boost::shared_ptr<cuNDArray<float_complext> > protonPreconditioner::calcKernel(uint64d2 dims, int angles){
boost::shared_ptr< cuNDArray<floatd2> > traj =
compute_radial_trajectory_fixed_angle_2d<float>(dims[0],angles,1);
boost::shared_ptr< cuNDArray<float> > dcw =
compute_radial_dcw_fixed_angle_2d<float>(dims[0],angles,2.0f,1.0f);
cuCgSolver<float_complext> solver;
solver.set_output_mode( cuCgSolver<float>::OUTPUT_VERBOSE );
boost::shared_ptr<cuNFFTOperator<float,2> > E (new cuNFFTOperator<float,2>);
E->setup( uint64d2(dims[0], dims[1]),
uint64d2(dims[0], dims[1])<<1, // !! <-- alpha_
5.5f );
E->set_dcw( dcw );
E->preprocess( traj.get() );
std::vector<size_t> data_dims;
data_dims.push_back(dims[0]);
data_dims.push_back(dims[1]);
std::vector<size_t > kernel_size;
kernel_size.push_back(dims[0]);
hoNDArray<float_complext> kernel(kernel_size);
float A2 = dims[0]*dims[0]/4;
for (size_t i = 0; i < dims[0]/2; i++)
kernel[i] = (dims[0]/2-float(i))/float(dims[0]/2);
for (size_t i = 0; i < dims[0]/2; i++)
kernel[i+dims[0]/2] = (float(i))/float(dims[0]/2);
/*
for (size_t k = 0; k < dims[0]/2; k++)
kernel[dims[0]/2-k-1] = k*A2/(A2-k*k)*::exp(-A2/(A2-k*k));
for (size_t k = 0; k < dims[0]/2; k++)
kernel[dims[0]-k-1] = kernel[k];
*/
cuNDArray<float_complext> cu_kernel(kernel);
boost::shared_ptr<cuNDArray<float_complext> > ekernel = expand(&cu_kernel,angles);
boost::shared_ptr< hoNDArray<float> > host_kernel = abs(ekernel.get())->to_host();
write_nd_array<float>( host_kernel.get(), "filter.real" );
E->set_domain_dimensions(&data_dims);
E->set_codomain_dimensions(ekernel->get_dimensions().get());
solver.set_encoding_operator(E);
solver.set_max_iterations(30);
solver.set_tc_tolerance(1e-5);
boost::shared_ptr<cuNDArray<float_complext> > result = solver.solve(ekernel.get());
boost::shared_ptr< hoNDArray<float> > host_norm = abs(result.get())->to_host();
write_nd_array<float>( host_norm.get(), "kernel.real" );
result = pad<float_complext,2>(dims*size_t(2),result.get());
cuNDFFT<float>::instance()->fft(result.get());
float cutoff = find_percentile(result.get(),0.05);
std::cout << "Cutoff: " << cutoff << std::endl;
//thrust::transform(result->begin(),result->end(),result->begin(),precon_cutoff(cutoff));
sqrt_inplace(result.get());
return result;
}
| b88a6c7cd31c178eb6d1eb33bdf85bf115e28072.cu | #include "protonPreconditioner.h"
#include "vector_td_utilities.h"
#include "cuCgSolver.h"
#include "vector_td.h"
#include "cuNDFFT.h"
#include "radial_utilities.h"
#include "cuNFFT.h"
#include "cuNFFTOperator.h"
#include "cuNDArray_math.h"
#include "hoNDArray_fileio.h"
#include <thrust/sort.h>
#include <thrust/transform.h>
using namespace Gadgetron;
void protonPreconditioner::apply(cuNDArray<float> * in, cuNDArray<float> * out){
boost::shared_ptr<cuNDArray<float_complext> > complex_in =real_to_complex<float_complext>(in);
uint64d2 dims = from_std_vector<size_t,2>(*in->get_dimensions());
complex_in = pad<float_complext,2>(dims*size_t(2),complex_in.get());
cuNDFFT<float>::instance()->fft(complex_in.get());
*complex_in *= *kernel_;
cuNDFFT<float>::instance()->ifft(complex_in.get());
*out = *crop<float,2>(dims/size_t(2),dims,real<float_complext>(complex_in.get()).get());
if (hull_.get()) *out *= *hull_;
}
static float find_percentile(cuNDArray<float_complext>* arr,float fraction){
boost::shared_ptr<cuNDArray<float> > absarr = abs(arr);
thrust::sort(absarr->begin(),absarr->end());
return absarr->at((size_t)(absarr->get_number_of_elements()*fraction));
}
struct precon_cutoff : public thrust::unary_function<float_complext,float_complext>
{
precon_cutoff(float cutoff){
_cutoff = cutoff;
}
__device__ float_complext operator()(const float_complext &x) const {
float ax = abs(x);
//if (ax < _cutoff) return x*exp(-(_cutoff-ax)*(_cutoff-ax)/(_cutoff*_cutoff));
if (ax < _cutoff) return float_complext(0);
else return x;
}
float _cutoff;
};
boost::shared_ptr<cuNDArray<float_complext> > protonPreconditioner::calcKernel(uint64d2 dims, int angles){
boost::shared_ptr< cuNDArray<floatd2> > traj =
compute_radial_trajectory_fixed_angle_2d<float>(dims[0],angles,1);
boost::shared_ptr< cuNDArray<float> > dcw =
compute_radial_dcw_fixed_angle_2d<float>(dims[0],angles,2.0f,1.0f);
cuCgSolver<float_complext> solver;
solver.set_output_mode( cuCgSolver<float>::OUTPUT_VERBOSE );
boost::shared_ptr<cuNFFTOperator<float,2> > E (new cuNFFTOperator<float,2>);
E->setup( uint64d2(dims[0], dims[1]),
uint64d2(dims[0], dims[1])<<1, // !! <-- alpha_
5.5f );
E->set_dcw( dcw );
E->preprocess( traj.get() );
std::vector<size_t> data_dims;
data_dims.push_back(dims[0]);
data_dims.push_back(dims[1]);
std::vector<size_t > kernel_size;
kernel_size.push_back(dims[0]);
hoNDArray<float_complext> kernel(kernel_size);
float A2 = dims[0]*dims[0]/4;
for (size_t i = 0; i < dims[0]/2; i++)
kernel[i] = (dims[0]/2-float(i))/float(dims[0]/2);
for (size_t i = 0; i < dims[0]/2; i++)
kernel[i+dims[0]/2] = (float(i))/float(dims[0]/2);
/*
for (size_t k = 0; k < dims[0]/2; k++)
kernel[dims[0]/2-k-1] = k*A2/(A2-k*k)*std::exp(-A2/(A2-k*k));
for (size_t k = 0; k < dims[0]/2; k++)
kernel[dims[0]-k-1] = kernel[k];
*/
cuNDArray<float_complext> cu_kernel(kernel);
boost::shared_ptr<cuNDArray<float_complext> > ekernel = expand(&cu_kernel,angles);
boost::shared_ptr< hoNDArray<float> > host_kernel = abs(ekernel.get())->to_host();
write_nd_array<float>( host_kernel.get(), "filter.real" );
E->set_domain_dimensions(&data_dims);
E->set_codomain_dimensions(ekernel->get_dimensions().get());
solver.set_encoding_operator(E);
solver.set_max_iterations(30);
solver.set_tc_tolerance(1e-5);
boost::shared_ptr<cuNDArray<float_complext> > result = solver.solve(ekernel.get());
boost::shared_ptr< hoNDArray<float> > host_norm = abs(result.get())->to_host();
write_nd_array<float>( host_norm.get(), "kernel.real" );
result = pad<float_complext,2>(dims*size_t(2),result.get());
cuNDFFT<float>::instance()->fft(result.get());
float cutoff = find_percentile(result.get(),0.05);
std::cout << "Cutoff: " << cutoff << std::endl;
//thrust::transform(result->begin(),result->end(),result->begin(),precon_cutoff(cutoff));
sqrt_inplace(result.get());
return result;
}
|
0a1d636461d2d6954509c26418756d8093a34322.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//********************************************************************************************************************************************************//
//*********************************************** Proton CT Preprocessing and Image Reconstruction Code *************************************************//
//********************************************************************************************************************************************************//
#include "pCT_Reconstruction.h"
//********************************************************************************************************************************************************//
//********************************************************************** Host Code ***********************************************************************//
//********************************************************************************************************************************************************//
// Preprocessing setup and initializations
void assign_SSD_positions();
void initializations();
void count_histories();
void count_histories_old();
void count_histories_v0();
void count_histories_v1();
void reserve_vector_capacity();
// Preprocessing routines
void read_data_chunk( const int, const int, const int );
void read_data_chunk_old( const int, const int, const int );
void read_data_chunk_v0( const int, const int, const int );
void read_data_chunk_v1( const int, const int, const int );
void recon_volume_intersections( const int );
void bin_valid_histories( const int );
void calculate_means();
void sum_squared_deviations( const int, const int );
void calculate_standard_deviations();
void statistical_cuts( const int, const int );
void initialize_sinogram();
void construct_sinogram();
void filter();
void backprojection();
// Hull-Detection
void hull_detection_initializations();
void hull_detection( int );
void hull_detection_finish();
void initialize_SC_hull( bool*&, bool*& );
void initialize_MSC_hull( int*&, int*& );
void initialize_SM_hull( int*&, int*& );
void initialize_float_image( float*&, float*& );
void SC( int );
void MSC( int );
void MSC_threshold();
void SM( int );
void SM_threshold();
void SM_threshold_2();
void averaging_filter( bool*&, bool*&, const int);
// MLP: IN DEVELOPMENT
void create_MLP_test_image();
void MLP_test();
void MLP();
void MLP_entry_exit( int&, int&, int& );
float mean_chord_length( float, float );
// Write arrays/vectors to file(s)
template<typename T> void write_array_to_disk( char*, const char*, const char*, T*, const int, const int, const int, const int, const bool );
template<typename T> void write_vector_to_disk( char*, const char*, const char*, vector<T>, const int, const int, const int, const bool );
// Memory transfers and allocations/deallocations
void post_cut_memory_clean();
void resize_vectors( const int );
void shrink_vectors( const int );
void initial_processing_memory_clean();
// Helper Functions
bool bad_data_angle( const int );
int calculate_x_voxel( const float, const int, const float );
int calculate_y_voxel( const float, const int, const float );
int calculate_slice( const float, const int, const float );
void early_exit_if( bool );
void start_execution_timing();
void stop_execution_timing();
// New routine test functions
void test_func();
//********************************************************************************************************************************************************//
//****************************************************************** Device (GPU) Code *******************************************************************//
//********************************************************************************************************************************************************//
// Preprocessing routines
__global__ void recon_volume_intersections_GPU( int, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*);
__global__ void bin_valid_histories_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* );
__global__ void calculate_means_GPU( int*, float*, float*, float* );
__global__ void sum_squared_deviations_GPU( int, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* );
__global__ void calculate_standard_deviations_GPU( int*, float*, float*, float* );
__global__ void statistical_cuts_GPU( int, int*, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, bool*, float*, float* );
__global__ void construct_sinogram_GPU( int*, float* );
__global__ void filter_GPU( float*, float* );
// Hull-Detection
__device__ void voxel_walk( bool*&, float, float, float, float, float, float );
__global__ void SC_GPU( int, bool*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void MSC_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void SM_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void MSC_threshold_GPU( int* );
__global__ void SM_threshold_GPU( int*, int* );
__global__ void SM_threshold_GPU_2( int*, int* );
__global__ void carve_differences( int*, int* );
__global__ void averaging_filter_GPU( bool*, const int, const float );
// New routine test functions
__global__ void test_func_GPU( int*);
__device__ void test_func_device( int&, int&, int&);
/************************************************************************************************************************************************************/
/******************************************************************** Program Main **************************************************************************/
/************************************************************************************************************************************************************/
int main(int argc, char** argv)
{
char user_response[20];
test_func();
if( RUN_ON )
{
/********************************************************************************************************************************************************/
/* Start the execution timing clock */
/********************************************************************************************************************************************************/
start_execution_timing();
/********************************************************************************************************************************************************/
/* Initialize hull detection images and transfer them to the GPU (performed if SC_ON, MSC_ON, or SM_ON is true) */
/********************************************************************************************************************************************************/
hull_detection_initializations();
/********************************************************************************************************************************************************/
/* Read the u-coordinates of the detector planes from the config file, allocate and initialize statistical data arrays, and count the number of */
/* histories per file, projection, gantry angle, scan, and total. */
/********************************************************************************************************************************************************/
if( DATA_FORMAT == -1 )
assign_SSD_positions(); // Read the detector plane u-coordinates from config file
initializations(); // allocate and initialize host and GPU memory for binning
count_histories(); // count the number of histories per file, per scan, total, etc.
switch( DATA_FORMAT )
{
case -1 : count_histories_old(); break;
case 0 : count_histories_v0(); break;
case 1 : count_histories_v1(); break;
}
/********************************************************************************************************************************************************/
/* Iteratively Read and Process Data One Chunk at a Time. There are at Most MAX_GPU_HISTORIES Per Chunk (i.e. Iteration). On Each Iteration: */
/* (1) Read data from file */
/* (2) Determine which histories traverse the reconstruction volume and store this information in a boolean array */
/* (3) Determine which bin each history belongs to */
/* (4) Use the boolean array to determine which histories to keep and then push the intermediate data from these histories onto the permanent */
/* storage vectors */
/* (5) Free up temporary host/GPU array memory allocated during iteration */
/********************************************************************************************************************************************************/
puts("Iteratively Reading Data from Hard Disk");
puts("Removing Proton Histories that Don't Pass Through the Reconstruction Volume");
puts("Binning the Data from Those that Did...");
int start_file_num = 0, end_file_num = 0, histories_to_process = 0;
while( start_file_num != NUM_FILES )
{
while( end_file_num < NUM_FILES )
{
if( histories_to_process + histories_per_file[end_file_num] < MAX_GPU_HISTORIES )
histories_to_process += histories_per_file[end_file_num];
else
break;
end_file_num++;
}
read_data_chunk( histories_to_process, start_file_num, end_file_num );
recon_volume_intersections( histories_to_process );
bin_valid_histories( histories_to_process );
hull_detection( histories_to_process );
initial_processing_memory_clean();
start_file_num = end_file_num;
histories_to_process = 0;
}
puts("Data reading complete.");
early_exit_if( EXIT_AFTER_BINNING );
/********************************************************************************************************************************************************/
/* Reduce vector capacities to their size, the number of histories remaining afterhistories that didn't intersect reconstruction volume were ignored */
/********************************************************************************************************************************************************/
shrink_vectors( recon_vol_histories );
/********************************************************************************************************************************************************/
/* Perform thresholding on MSC and SM hulls and write all hull images to file */
/********************************************************************************************************************************************************/
hull_detection_finish();
early_exit_if( EXIT_AFTER_HULL_DETECTION );
/********************************************************************************************************************************************************/
/* Calculate the mean WEPL, relative ut-angle, and relative uv-angle for each bin and count the number of histories in each bin */
/********************************************************************************************************************************************************/
calculate_means();
/********************************************************************************************************************************************************/
/* Calculate the standard deviation in WEPL, relative ut-angle, and relative uv-angle for each bin. Iterate through the valid history vectors one */
/* chunk at a time, with at most MAX_GPU_HISTORIES per chunk, and calculate the difference between the mean WEPL and WEPL, mean relative ut-angle and */
/* relative ut-angle, and mean relative uv-angle and relative uv-angle for each history. The standard deviation is then found by calculating the sum */
/* of these differences for each bin and dividing it by the number of histories in the bin */
/********************************************************************************************************************************************************/
puts("Calculating the cumulative sum of the squared deviation in WEPL and relative ut/uv angles over all histories for each bin...");
int remaining_histories = recon_vol_histories;
int start_position = 0;
while( remaining_histories > 0 )
{
if( remaining_histories > MAX_GPU_HISTORIES )
histories_to_process = MAX_GPU_HISTORIES;
else
histories_to_process = remaining_histories;
sum_squared_deviations( start_position, histories_to_process );
remaining_histories -= MAX_GPU_HISTORIES;
start_position += MAX_GPU_HISTORIES;
} // sum_sqd_deviations
// sum_squared_deviations
calculate_standard_deviations();
/********************************************************************************************************************************************************/
/* Allocate host memory for the sinogram, initialize it to zeros, allocate memory for it on the GPU, then transfer the initialized sinogram to the GPU */
/********************************************************************************************************************************************************/
initialize_sinogram();
/********************************************************************************************************************************************************/
/* Iterate through the valid history vectors one chunk at a time, with at most MAX_GPU_HISTORIES per chunk, and perform statistical cuts */
/********************************************************************************************************************************************************/
puts("Performing statistical cuts...");
remaining_histories = recon_vol_histories, start_position = 0;
while( remaining_histories > 0 )
{
if( remaining_histories > MAX_GPU_HISTORIES )
histories_to_process = MAX_GPU_HISTORIES;
else
histories_to_process = remaining_histories;
statistical_cuts( start_position, histories_to_process );
remaining_histories -= MAX_GPU_HISTORIES;
start_position += MAX_GPU_HISTORIES;
}
puts("Statistical cuts complete...");
printf("%d out of %d (%4f) histories passed cuts\n", post_cut_histories, total_histories, double( post_cut_histories / total_histories * 100 ) );
/********************************************************************************************************************************************************/
/* Free host memory for bin number array, free GPU memory for the statistics arrays, and shrink vectors to the number of histories that passed cuts */
/********************************************************************************************************************************************************/
post_cut_memory_clean();
resize_vectors( post_cut_histories );
shrink_vectors( post_cut_histories );
early_exit_if( EXIT_AFTER_STATISTICAL_CUTS );
/********************************************************************************************************************************************************/
/* Recalculate the mean WEPL for each bin using the histories remaining after cuts and use these to produce the sinogram */
/********************************************************************************************************************************************************/
construct_sinogram();
/********************************************************************************************************************************************************/
/* Perform filtered backprojection and write FBP hull to disk */
/********************************************************************************************************************************************************/
if( FBP_ON )
{
filter();
backprojection();
}
early_exit_if( EXIT_AFTER_FBP );
/********************************************************************************************************************************************************/
/* End program execution timing clock and print the total execution time to console window */
/********************************************************************************************************************************************************/
stop_execution_timing();
}
/************************************************************************************************************************************************************/
/* Program has finished execution. require the user to hit the enter key to terminate the program and close the terminal/console window */
/************************************************************************************************************************************************************/
puts("Preprocessing complete. Press any key to close the console window...");
fgets(user_response, sizeof(user_response), stdin);
}
/************************************************************************************************************************************************************/
/******************************************************** Preprocessing Setup and Initializations ***********************************************************/
/************************************************************************************************************************************************************/
void assign_SSD_positions() //HERE THE COORDINATES OF THE DETECTORS PLANES ARE LOADED, THE CONFIG FILE IS CREATED BY FORD (RWS)
{
char user_response[20];
char configFilename[512];
puts("Reading tracker plane positions...");
sprintf(configFilename, "%s%s\\scan.cfg", input_directory, input_folder);
if( DEBUG_TEXT_ON )
printf("Opening config file %s...\n", configFilename);
ifstream configFile(configFilename);
if( !configFile.is_open() ) {
printf("ERROR: config file not found at %s!\n", configFilename);
fputs("Didn't Find File", stdout);
fflush(stdout);
printf("text = \"%s\"\n", user_response);
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
else
{
fputs("Found File", stdout);
fflush(stdout);
printf("user_response = \"%s\"\n", user_response);
}
if( DEBUG_TEXT_ON )
puts("Reading Tracking Plane Positions...");
for( int i = 0; i < 8; i++ ) {
configFile >> SSD_u_Positions[i];
if( DEBUG_TEXT_ON )
printf("SSD_u_Positions[%d] = %3f", i, SSD_u_Positions[i]);
}
configFile.close();
}
void initializations()
{
puts("Allocating statistical analysis arrays on host/GPU and counting proton histories...");
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
histories_per_scan[scan_number] = 0;
histories_per_file = (int*) calloc( NUM_SCANS * GANTRY_ANGLES, sizeof(int) );
histories_per_gantry_angle = (int*) calloc( GANTRY_ANGLES, sizeof(int) );
recon_vol_histories_per_projection = (int*) calloc( GANTRY_ANGLES, sizeof(int) );
bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) );
mean_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) );
mean_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
mean_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) );
hipMalloc((void**) &bin_counts_d, MEM_SIZE_BINS_INTS );
hipMalloc((void**) &mean_WEPL_d, MEM_SIZE_BINS_FLOATS );
hipMalloc((void**) &mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS );
hipMalloc((void**) &mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS );
hipMalloc((void**) &stddev_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS );
hipMalloc((void**) &stddev_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS );
hipMalloc((void**) &stddev_WEPL_d, MEM_SIZE_BINS_FLOATS );
hipMemcpy( bin_counts_d, bin_counts_h, MEM_SIZE_BINS_INTS, hipMemcpyHostToDevice );
hipMemcpy( mean_WEPL_d, mean_WEPL_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
hipMemcpy( mean_rel_ut_angle_d, mean_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
hipMemcpy( mean_rel_uv_angle_d, mean_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
hipMemcpy( stddev_rel_ut_angle_d, stddev_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
hipMemcpy( stddev_rel_uv_angle_d, stddev_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
hipMemcpy( stddev_WEPL_d, stddev_WEPL_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
}
void count_histories()
{
switch( DATA_FORMAT )
{
case -1 : count_histories_old(); break;
case 0 : count_histories_v0(); break;
case 1 : count_histories_v1(); break;
}
}
void count_histories_old()
{
if( DEBUG_TEXT_ON )
printf("Counting histories...\n");
char user_response[20];
char data_filename[128];
int file_size, num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension );
//printf("Name = %s", data_filename );
FILE *data_file = fopen(data_filename, "rb");
if( data_file == NULL )
{
fputs( "Error Opening Data File: Check that the directories are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
fseek( data_file, 0, SEEK_END );
file_size = ftell( data_file );
if( BINARY_ENCODING )
{
if( file_size % BYTES_PER_HISTORY )
{
printf("ERROR! Problem with bytes_per_history!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(2);
}
num_histories = file_size / BYTES_PER_HISTORY;
}
else
num_histories = file_size;
fclose(data_file);
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
}
void count_histories_v0()
{
if( DEBUG_TEXT_ON )
puts("Counting histories...\n");
char user_response[20];
char data_filename[256];
int num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
//cout << data_filename << endl;
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
data_file.read((char*)&num_histories, sizeof(int));
data_file.close();
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
else
{
printf("ERROR: Unsupported format version (%d)!\n", version_id);
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
// The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate
// through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a
// valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin
// number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many
// valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories
// are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw
// data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we
// have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to
// and from the GPU.
}
void count_histories_v1()
{
if( DEBUG_TEXT_ON )
printf("Counting histories...\n");
char user_response[20];
char data_filename[128];
int file_size, num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf(data_filename, "%s%s/%s_%03d%%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
FILE *data_file = fopen(data_filename, "rb");
if( data_file == NULL )
{
fputs( "Error Opening Data File: Check that the directories are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
fseek( data_file, 0, SEEK_END );
file_size = ftell( data_file );
if( BINARY_ENCODING )
{
if( file_size % BYTES_PER_HISTORY )
{
printf("ERROR! Problem with bytes_per_history!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(2);
}
num_histories = file_size / BYTES_PER_HISTORY;
}
else
num_histories = file_size;
fclose(data_file);
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
// The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate
// through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a
// valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin
// number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many
// valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories
// are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw
// data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we
// have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to
// and from the GPU.
}
void reserve_vector_capacity()
{
bin_num_vector.reserve( total_histories );
//gantry_angle_vector.reserve( total_histories );
WEPL_vector.reserve( total_histories );
x_entry_vector.reserve( total_histories );
y_entry_vector.reserve( total_histories );
z_entry_vector.reserve( total_histories );
x_exit_vector.reserve( total_histories );
y_exit_vector.reserve( total_histories );
z_exit_vector.reserve( total_histories );
xy_entry_angle_vector.reserve( total_histories );
xz_entry_angle_vector.reserve( total_histories );
//xy_exit_angle_vector.reserve( total_histories );
//xz_exit_angle_vector.reserve( total_histories );
relative_ut_angle_vector.reserve( total_histories );
relative_uv_angle_vector.reserve( total_histories );
}
/************************************************************************************************************************************************************/
/********************************************************* Data Importation, Initial Cuts, and Binning ******************************************************/
/************************************************************************************************************************************************************/
void read_data_chunk( const int histories_to_process, const int start_file_num, const int end_file_num )
{
switch( DATA_FORMAT )
{
case -1 : read_data_chunk_old( histories_to_process, start_file_num, end_file_num - 1 ); break;
case 0 : read_data_chunk_v0( histories_to_process, start_file_num, end_file_num - 1 ); break;
case 1 : read_data_chunk_v1( histories_to_process, start_file_num, end_file_num - 1 ); break;
}
}
void read_data_chunk_old( const int num_histories, const int start_file_num, const int end_file_num )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
int array_index = 0, gantry_position, gantry_angle, scan_number, scan_histories;
float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
char tracker_plane[4];
char data_filename[128];
FILE* data_file;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
gantry_position = file_num / NUM_SCANS;
gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
scan_number = file_num % NUM_SCANS + 1;
scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension );
data_file = fopen( data_filename, "rb" );
for( int history = 0; history < scan_histories; history++, array_index++ )
{
fread(&v_data, sizeof(float), 4, data_file);
fread(&t_data, sizeof(float), 4, data_file);
fread(&tracker_plane, sizeof(char), 4, data_file);
fread(&WEPL_data, sizeof(float), 1, data_file);
fread(&gantry_angle_data, sizeof(float), 1, data_file);
fread(&dummy_data, sizeof(float), 1, data_file); // dummy read because each event has an extra 4 bytes, for some reason
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[array_index] = v_data[0] * 0.1;
v_in_2_h[array_index] = v_data[1] * 0.1;
v_out_1_h[array_index] = v_data[2] * 0.1;
v_out_2_h[array_index] = v_data[3] * 0.1;
t_in_1_h[array_index] = t_data[0] * 0.1;
t_in_2_h[array_index] = t_data[1] * 0.1;
t_out_1_h[array_index] = t_data[2] * 0.1;
t_out_2_h[array_index] = t_data[3] * 0.1;
WEPL_h[array_index] = WEPL_data * 0.1;
}
else
{
v_in_1_h[array_index] = v_data[0];
v_in_2_h[array_index] = v_data[1];
v_out_1_h[array_index] = v_data[2];
v_out_2_h[array_index] = v_data[3];
t_in_1_h[array_index] = t_data[0];
t_in_2_h[array_index] = t_data[1];
t_out_1_h[array_index] = t_data[2];
t_out_2_h[array_index] = t_data[3];
WEPL_h[array_index] = WEPL_data;
}
if( !MICAH_SIM )
{
u_in_1_h[array_index] = SSD_u_Positions[int(tracker_plane[0])];
u_in_2_h[array_index] = SSD_u_Positions[int(tracker_plane[1])];
u_out_1_h[array_index] = SSD_u_Positions[int(tracker_plane[2])];
u_out_2_h[array_index] = SSD_u_Positions[int(tracker_plane[3])];
}
else
{
u_in_1_h[array_index] = SSD_u_Positions[0];
u_in_2_h[array_index] = SSD_u_Positions[2];
u_out_1_h[array_index] = SSD_u_Positions[4];
u_out_2_h[array_index] = SSD_u_Positions[6];
}
if( SSD_IN_MM )
{
// Convert the tracking plane positions from mm to cm
u_in_1_h[array_index] *= 0.1;
u_in_2_h[array_index] *= 0.1;
u_out_1_h[array_index] *= 0.1;
u_out_2_h[array_index] *= 0.1;
}
gantry_angle_h[array_index] = int(gantry_angle_data);
}
fclose(data_file);
}
}
void read_data_chunk_v0( const int num_histories, const int start_file_num, const int end_file_num )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
if( WRITE_SSD_ANGLES )
{
ut_entry_angle = (float*) malloc(mem_size_hist_floats);
uv_entry_angle = (float*) malloc(mem_size_hist_floats);
ut_exit_angle = (float*) malloc(mem_size_hist_floats);
uv_exit_angle = (float*) malloc(mem_size_hist_floats);
}
/*
Contains the following headers:
Magic number identifier: "PCTD" (4-byte string)
Format version identifier (integer)
Number of events in file (integer)
Projection angle (float | degrees)
Beam energy (float | MeV)
Acquisition/generation date (integer | Unix time)
Pre-process date (integer | Unix time)
Phantom name or description (variable length string)
Data source (variable length string)
Prepared by (variable length string)
* Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string.
Event data:
Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory.
Detector coordinates in mm relative to a phantom center, given in the detector coordinate system:
t0 (float * N)
t1 (float * N)
t2 (float * N)
t3 (float * N)
v0 (float * N)
v1 (float * N)
v2 (float * N)
v3 (float * N)
u0 (float * N)
u1 (float * N)
u2 (float * N)
u3 (float * N)
WEPL in mm (float * N)
*/
char user_response[20];
char data_filename[128];
//int array_index = 0;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
int gantry_position = file_num / NUM_SCANS;
int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
int scan_number = file_num % NUM_SCANS + 1;
//int scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
int file_histories;
data_file.read((char*)&file_histories, sizeof(int));
puts("Reading headers from file...\n");
float projection_angle, beam_energy;
int generation_date, preprocess_date;
int phantom_name_size, data_source_size, prepared_by_size;
char *phantom_name, *data_source, *prepared_by;
data_file.read((char*)&projection_angle, sizeof(float));
data_file.read((char*)&beam_energy, sizeof(float));
data_file.read((char*)&generation_date, sizeof(int));
data_file.read((char*)&preprocess_date, sizeof(int));
data_file.read((char*)&phantom_name_size, sizeof(int));
phantom_name = (char*)malloc(phantom_name_size);
data_file.read(phantom_name, phantom_name_size);
data_file.read((char*)&data_source_size, sizeof(int));
data_source = (char*)malloc(data_source_size);
data_file.read(data_source, data_source_size);
data_file.read((char*)&prepared_by_size, sizeof(int));
prepared_by = (char*)malloc(prepared_by_size);
data_file.read(prepared_by, prepared_by_size);
printf("Loading %d histories from file\n", num_histories);
int data_size = num_histories * sizeof(float);
data_file.read((char*)t_in_1_h, data_size);
data_file.read((char*)t_in_2_h, data_size);
data_file.read((char*)t_out_1_h, data_size);
data_file.read((char*)t_out_2_h, data_size);
data_file.read((char*)v_in_1_h, data_size);
data_file.read((char*)v_in_2_h, data_size);
data_file.read((char*)v_out_1_h, data_size);
data_file.read((char*)v_out_2_h, data_size);
data_file.read((char*)u_in_1_h, data_size);
data_file.read((char*)u_in_2_h, data_size);
data_file.read((char*)u_out_1_h, data_size);
data_file.read((char*)u_out_2_h, data_size);
data_file.read((char*)WEPL_h, data_size);
//float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
for( int i = 0; i < num_histories; i++ )
{
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[i] *= 0.1;
v_in_2_h[i] *= 0.1;
v_out_1_h[i] *= 0.1;
v_out_2_h[i] *= 0.1;
t_in_1_h[i] *= 0.1;
t_in_2_h[i] *= 0.1;
t_out_1_h[i] *= 0.1;
t_out_2_h[i] *= 0.1;
WEPL_h[i] *= 0.1;
if( WEPL_h[i] < 0 )
printf("WEPL[%d] = %3f\n", i, WEPL_h[i] );
u_in_1_h[i] *= 0.1;
u_in_2_h[i] *= 0.1;
u_out_1_h[i] *= 0.1;
u_out_2_h[i] *= 0.1;
if( WRITE_SSD_ANGLES )
{
ut_entry_angle[i] = atan2f( t_in_2_h[i] - t_in_1_h[i], u_in_2_h[i] - u_in_1_h[i] );
uv_entry_angle[i] = atan2f( v_in_2_h[i] - v_in_1_h[i], u_in_2_h[i] - u_in_1_h[i] );
ut_exit_angle[i] = atan2f( t_out_2_h[i] - t_out_1_h[i], u_out_2_h[i] - u_out_1_h[i] );
uv_exit_angle[i] = atan2f( v_out_2_h[i] - v_out_1_h[i], u_out_2_h[i] - u_out_1_h[i] );
}
}
gantry_angle_h[i] = int(projection_angle);
}
data_file.close();
if( WRITE_SSD_ANGLES )
{
sprintf(data_filename, "%s_%03d%s", "ut_entry_angle", gantry_angle, ".txt" );
write_array_to_disk( data_filename, output_directory, output_folder, ut_entry_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "uv_entry_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, uv_entry_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "ut_exit_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, ut_exit_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "uv_exit_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, uv_exit_angle, COLUMNS, ROWS, SLICES, file_histories, true );
}
}
}
}
void read_data_chunk_v1( const int num_histories, const int start_file_num, const int end_file_num ){
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
/*
Contains the following headers:
Magic number identifier: "PCTD" (4-byte string)
Format version identifier (integer)
Number of events in file (integer)
Projection angle (float | degrees)
Beam energy (float | MeV)
Acquisition/generation date (integer | Unix time)
Pre-process date (integer | Unix time)
Phantom name or description (variable length string)
Data source (variable length string)
Prepared by (variable length string)
* Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string.
Event data:
Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory.
Detector coordinates in mm relative to a phantom center, given in the detector coordinate system:
t0 (float * N)
t1 (float * N)
t2 (float * N)
t3 (float * N)
v0 (float * N)
v1 (float * N)
v2 (float * N)
v3 (float * N)
u0 (float * N)
u1 (float * N)
u2 (float * N)
u3 (float * N)
WEPL in mm (float * N)
*/
char user_response[20];
char data_filename[128];
//int array_index = 0;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
int gantry_position = file_num / NUM_SCANS;
int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
int scan_number = file_num % NUM_SCANS + 1;
//int scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
int num_histories;
data_file.read((char*)&num_histories, sizeof(int));
puts("Reading headers from file...\n");
float projection_angle, beam_energy;
int generation_date, preprocess_date;
int phantom_name_size, data_source_size, prepared_by_size;
char *phantom_name, *data_source, *prepared_by;
data_file.read((char*)&projection_angle, sizeof(float));
data_file.read((char*)&beam_energy, sizeof(float));
data_file.read((char*)&generation_date, sizeof(int));
data_file.read((char*)&preprocess_date, sizeof(int));
data_file.read((char*)&phantom_name_size, sizeof(int));
phantom_name = (char*)malloc(phantom_name_size);
data_file.read(phantom_name, phantom_name_size);
data_file.read((char*)&data_source_size, sizeof(int));
data_source = (char*)malloc(data_source_size);
data_file.read(data_source, data_source_size);
data_file.read((char*)&prepared_by_size, sizeof(int));
prepared_by = (char*)malloc(prepared_by_size);
data_file.read(prepared_by, prepared_by_size);
printf("Loading %d histories from file\n", num_histories);
int data_size = num_histories * sizeof(float);
data_file.read((char*)t_in_1_h, data_size);
data_file.read((char*)t_in_2_h, data_size);
data_file.read((char*)t_out_1_h, data_size);
data_file.read((char*)t_out_2_h, data_size);
data_file.read((char*)v_in_1_h, data_size);
data_file.read((char*)v_in_2_h, data_size);
data_file.read((char*)v_out_1_h, data_size);
data_file.read((char*)v_out_2_h, data_size);
data_file.read((char*)u_in_1_h, data_size);
data_file.read((char*)u_in_2_h, data_size);
data_file.read((char*)u_out_1_h, data_size);
data_file.read((char*)u_out_2_h, data_size);
data_file.read((char*)WEPL_h, data_size);
//float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
for( int i = 0; i < num_histories; i++ )
{
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[i] *= 0.1;
v_in_2_h[i] *= 0.1;
v_out_1_h[i] *= 0.1;
v_out_2_h[i] *= 0.1;
t_in_1_h[i] *= 0.1;
t_in_2_h[i] *= 0.1;
t_out_1_h[i] *= 0.1;
t_out_2_h[i] *= 0.1;
WEPL_h[i] *= 0.1;
if( WEPL_h[i] < 0 )
printf("WEPL[%d] = %3f\n", i, WEPL_h[i] );
u_in_1_h[i] *= 0.1;
u_in_2_h[i] *= 0.1;
u_out_1_h[i] *= 0.1;
u_out_2_h[i] *= 0.1;
}
gantry_angle_h[i] = int(projection_angle);
}
data_file.close();
}
}
}
void recon_volume_intersections( const int num_histories )
{
//printf("There are %d histories in this projection\n", num_histories );
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bool = sizeof(bool) * num_histories;
// Allocate GPU memory
hipMalloc((void**) &t_in_1_d, mem_size_hist_floats);
hipMalloc((void**) &t_in_2_d, mem_size_hist_floats);
hipMalloc((void**) &t_out_1_d, mem_size_hist_floats);
hipMalloc((void**) &t_out_2_d, mem_size_hist_floats);
hipMalloc((void**) &u_in_1_d, mem_size_hist_floats);
hipMalloc((void**) &u_in_2_d, mem_size_hist_floats);
hipMalloc((void**) &u_out_1_d, mem_size_hist_floats);
hipMalloc((void**) &u_out_2_d, mem_size_hist_floats);
hipMalloc((void**) &v_in_1_d, mem_size_hist_floats);
hipMalloc((void**) &v_in_2_d, mem_size_hist_floats);
hipMalloc((void**) &v_out_1_d, mem_size_hist_floats);
hipMalloc((void**) &v_out_2_d, mem_size_hist_floats);
hipMalloc((void**) &WEPL_d, mem_size_hist_floats);
hipMalloc((void**) &gantry_angle_d, mem_size_hist_ints);
hipMalloc((void**) &x_entry_d, mem_size_hist_floats);
hipMalloc((void**) &y_entry_d, mem_size_hist_floats);
hipMalloc((void**) &z_entry_d, mem_size_hist_floats);
hipMalloc((void**) &x_exit_d, mem_size_hist_floats);
hipMalloc((void**) &y_exit_d, mem_size_hist_floats);
hipMalloc((void**) &z_exit_d, mem_size_hist_floats);
hipMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
hipMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats);
hipMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats);
hipMalloc((void**) &traversed_recon_volume_d, mem_size_hist_bool);
hipMemcpy(t_in_1_d, t_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(t_in_2_d, t_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(t_out_1_d, t_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(t_out_2_d, t_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(u_in_1_d, u_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(u_in_2_d, u_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(u_out_1_d, u_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(u_out_2_d, u_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(v_in_1_d, v_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(v_in_2_d, v_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(v_out_1_d, v_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(v_out_2_d, v_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
hipMemcpy(gantry_angle_d, gantry_angle_h, mem_size_hist_ints, hipMemcpyHostToDevice) ;
hipMemcpy(WEPL_d, WEPL_h, mem_size_hist_floats, hipMemcpyHostToDevice) ;
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
hipLaunchKernelGGL(( recon_volume_intersections_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, gantry_angle_d, traversed_recon_volume_d, WEPL_d,
t_in_1_d, t_in_2_d, t_out_1_d, t_out_2_d,
u_in_1_d, u_in_2_d, u_out_1_d, u_out_2_d,
v_in_1_d, v_in_2_d, v_out_1_d, v_out_2_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d,
xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d,
relative_ut_angle_d, relative_uv_angle_d
);
free(t_in_1_h);
free(t_in_2_h);
free(v_in_1_h);
free(v_in_2_h);
free(u_in_1_h);
free(u_in_2_h);
free(t_out_1_h);
free(t_out_2_h);
free(v_out_1_h);
free(v_out_2_h);
free(u_out_1_h);
free(u_out_2_h);
hipFree(t_in_1_d);
hipFree(t_in_2_d);
hipFree(v_in_1_d);
hipFree(v_in_2_d);
hipFree(u_in_1_d);
hipFree(u_in_2_d);
hipFree(t_out_1_d);
hipFree(t_out_2_d);
hipFree(v_out_1_d);
hipFree(v_out_2_d);
hipFree(u_out_1_d);
hipFree(u_out_2_d);
hipFree(gantry_angle_d);
}
__global__ void recon_volume_intersections_GPU
(
int num_histories, int* gantry_angle, bool* traversed_recon_volume, float* WEPL,
float* t_in_1, float* t_in_2, float* t_out_1, float* t_out_2,
float* u_in_1, float* u_in_2, float* u_out_1, float* u_out_2,
float* v_in_1, float* v_in_2, float* v_out_1, float* v_out_2,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* relative_ut_angle, float* relative_uv_angle
)
{
/*
Determine if the proton path passes through the reconstruction volume (i.e. intersects the reconstruction
cylinder twice) and if it does, determine the x, y, and z positions in the global/object coordinate system where
the proton enters and exits the reconstruction volume. The origin of the object coordinate system is defined to
be at the center of the reconstruction cylinder so that its volume is bounded by:
-RECON_CYL_RADIUS <= x <= RECON_CYL_RADIUS
-RECON_CYL_RADIUS <= y <= RECON_CYL_RADIUS
-RECON_CYL_HEIGHT/2 <= z <= RECON_CYL_HEIGHT/2
First, the coordinates of the points where the proton path intersected the entry/exit detectors must be
calculated. Since the detectors records data in the detector coordinate system, data in the utv coordinate
system must be converted into the global/object coordinate system. The coordinate transformation can be
accomplished using a rotation matrix with an angle of rotation determined by the angle between the two
coordinate systems, which is the gantry_angle, in this case:
Rotate ut-coordinate system to xy-coordinate system
x = cos( gantry_angle ) * u - sin( gantry_angle ) * t
y = sin( gantry_angle ) * u + cos( gantry_angle ) * t
Rotate xy-coordinate system to ut-coordinate system
u = cos( gantry_angle ) * x + sin( gantry_angle ) * y
t = cos( gantry_angle ) * y - sin( gantry_angle ) * x
If a proton passes through the reconstruction volume, then the line defining its path in the
xy-plane will intersect the circle defining the boundary of the reconstruction cylinder in the xy-plane twice.
We can determine if the proton path passes through the reconstruction volume by equating the equations of the
proton path and the circle. This produces a second order polynomial which we must solve:
f(x)_proton = f(x)_cylinder
mx+b = sqrt(r^2 - x^2)
m^2x^2 + 2mbx + b^2 = r^2 - x^2
(m^2 + 1)x^2 + 2mbx + (b^2 - r^2) = 0
ax^2 + bx + c = 0
=> a = m^2 + 1
b = 2mb
c = b^2 - r^2
We can solve this using the quadratic formula ([-b +/- sqrt(b^2-4ac)]/2a). If the proton passed through the
reconstruction volume, then the determinant will be greater than zero ( b^2-4ac > 0 ) and the quadratic formula
will return two unique points of intersection. The intersection point closest to where the proton entry/exit
path intersects the entry/exit
detector plane is calculated and The proton entry/exit path If the determinant <= 0, then the proton path does not go through the reconstruction
volume and we need not determine intersection coordinates. Two points are returned by the quadratic formula
for each reconstruction cylinder intersection, the coordinates closest to the point where the entry/exit path
intersected the detector plane are determined
If the exit/entry path travels through the cone bounded by y=|x| && y=-|x| the x_coordinates will be small
and the difference between the entry and exit x-coordinates will approach zero, causing instabilities in trig
functions and slope calculations ( x difference in denominator). To overcome these innaccurate calculations,
coordinates for these proton paths will be rotated PI/2 radians(90 degrees) prior to calculations and rotated
back when they are completed using a rotation matrix transformation again:
Positive Rotation By 90 Degrees
x' = cos( 90 ) * x - sin( 90 ) * y = -y
y' = sin( 90 ) * x + cos( 90 ) * y = x
Negative Rotation By 90 Degree
x' = cos( 90 ) * x + sin( 90 ) * y = y
y' = cos( 90 ) * y - sin( 90 ) * x = -x
*/
float a = 0, b = 0, c = 0;
float x_intercept_1, x_intercept_2, y_intercept_1, y_intercept_2, squared_distance_1, squared_distance_2;
float x_temp, y_temp;
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
float rotation_angle_radians = gantry_angle[i] * ANGLE_TO_RADIANS;
traversed_recon_volume[i] = false;
if( i < num_histories )
{
/***************************************************************************************************************/
/**************************************** Check entry information **********************************************/
/***************************************************************************************************************/
// Determine if the proton path enters the reconstruction volume. The proton path is defined using the entry angle and
// position where the proton intersected the entry SSD which is closest to the object. If this line projected onto the
// xy plane intersects the reconstruction cylinder, the line will intersect the circle in the xy plane which describes the
// boundary of the reconstruction cylinder twice and its entry elevation will be within the height of the cylinder.
// Relevant angles in radians: gantry angle, proton path entry angle in ut and xy planes.
float ut_entry_angle = atan2f( t_in_2[i] - t_in_1[i], u_in_2[i] - u_in_1[i] );
xy_entry_angle[i] = ut_entry_angle + rotation_angle_radians;
if( xy_entry_angle[i] < 0 )
xy_entry_angle[i] += TWO_PI;
// Rotate entry detector positions
float x_in = ( cosf( rotation_angle_radians ) * u_in_2[i] ) - ( sinf( rotation_angle_radians ) * t_in_2[i] );
float y_in = ( sinf( rotation_angle_radians ) * u_in_2[i] ) + ( cosf( rotation_angle_radians ) * t_in_2[i] );
// Determine if entry points should be rotated
bool entry_in_cone =
( (xy_entry_angle[i] > PI_OVER_4) && (xy_entry_angle[i] < THREE_PI_OVER_4) )
||
( (xy_entry_angle[i] > FIVE_PI_OVER_4) && (xy_entry_angle[i] < SEVEN_PI_OVER_4) );
// Rotate x_in & y_in by 90 degrees, if necessary
if( entry_in_cone )
{
x_temp = x_in;
y_temp = y_in;
x_in = -y_temp;
y_in = x_temp;
xy_entry_angle[i] += PI_OVER_2;
}
float m_in = tanf( xy_entry_angle[i] ); // proton entry path slope
float b_in = y_in - m_in * x_in; // proton entry path y-intercept
// Quadratic formula coefficients
a = 1 + pow(m_in, 2); // x^2 coefficient
b = 2 * m_in * b_in; // x coefficient
c = pow(b_in, 2) - pow(RECON_CYL_RADIUS, 2 ); // 1 coefficient
float entry_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant
bool entered = ( entry_discriminant > 0 ); // Proton path intersected twice
// Find both intersection points of the circle; closest one to the entry SSDs is the entry position
// Notice that x_intercept_2 = ( -b - sqrt(...) ) / ( 2 * a ) has the negative sign pulled out and following calculations modified as necessary
// e.g. x_intercept_2 = -x_real_2
// y_intercept_2 = -y_real_2
// squared_distance_2 = sqd_real_2 since (x_intercept_2 + x_in)^2 = (-x_intercept_2 - x_in)^2 = (x_real_2 - x_in)^2 (same for y term)
// This negation is also considered when assigning x_entry/y_entry using -x_intercept_2/y_intercept_2 *(TRUE/FALSE = 1/0)
if( entered )
{
x_intercept_1 = ( sqrtf(entry_discriminant) - b ) / ( 2 * a );
x_intercept_2 = ( sqrtf(entry_discriminant) + b ) / ( 2 * a );
y_intercept_1 = m_in * x_intercept_1 + b_in;
y_intercept_2 = m_in * x_intercept_2 - b_in;
squared_distance_1 = pow(x_intercept_1 - x_in, 2) + pow(y_intercept_1 - y_in, 2);
squared_distance_2 = pow(x_intercept_2 + x_in, 2) + pow(y_intercept_2 + y_in, 2);
x_entry[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2);
y_entry[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2);
}
// Unrotate by 90 degrees, if necessary
if( entry_in_cone )
{
x_temp = x_entry[i];
y_temp = y_entry[i];
x_entry[i] = y_temp;
y_entry[i] = -x_temp;
xy_entry_angle[i] -= PI_OVER_2;
}
/***************************************************************************************************************/
/****************************************** Check exit information *********************************************/
/***************************************************************************************************************/
// Repeat the procedure above, this time to determine if the proton path exited the reconstruction volume and if so, the
// x,y,z position where it exited
float ut_exit_angle = atan2f( t_out_2[i] - t_out_1[i], u_out_2[i] - u_out_1[i] );
xy_exit_angle[i] = ut_exit_angle + rotation_angle_radians;
if( xy_exit_angle[i] < 0 )
xy_exit_angle[i] += TWO_PI;
// Rotate exit detector positions
float x_out = ( cosf(rotation_angle_radians) * u_out_1[i] ) - ( sinf(rotation_angle_radians) * t_out_1[i] );
float y_out = ( sinf(rotation_angle_radians) * u_out_1[i] ) + ( cosf(rotation_angle_radians) * t_out_1[i] );
// Determine if exit points should be rotated
bool exit_in_cone =
( (xy_exit_angle[i] > PI_OVER_4) && (xy_exit_angle[i] < THREE_PI_OVER_4) )
||
( (xy_exit_angle[i] > FIVE_PI_OVER_4) && (xy_exit_angle[i] < SEVEN_PI_OVER_4) );
// Rotate x_out & y_out by 90 degrees, if necessary
if( exit_in_cone )
{
x_temp = x_out;
y_temp = y_out;
x_out = -y_temp;
y_out = x_temp;
xy_exit_angle[i] += PI_OVER_2;
}
float m_out = tanf( xy_exit_angle[i] ); // proton entry path slope
float b_out = y_out - m_out * x_out; // proton entry path y-intercept
// Quadratic formula coefficients
a = 1 + pow(m_out, 2); // x^2 coefficient
b = 2 * m_out * b_out; // x coefficient
c = pow(b_out, 2) - pow(RECON_CYL_RADIUS, 2); // 1 coefficient
float exit_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant
bool exited = ( exit_discriminant > 0 ); // Proton path intersected twice
// Find both intersection points of the circle; closest one to the exit SSDs is the exit position
if( exited )
{
x_intercept_1 = ( sqrtf(exit_discriminant) - b ) / ( 2 * a );
x_intercept_2 = ( sqrtf(exit_discriminant) + b ) / ( 2 * a );// -x calculated
y_intercept_1 = m_out * x_intercept_1 + b_out;
y_intercept_2 = m_out * x_intercept_2 - b_out;// -y calculated
squared_distance_1 = pow(x_intercept_1 - x_out, 2) + pow(y_intercept_1 - y_out, 2);
squared_distance_2 = pow(x_intercept_2 + x_out, 2) + pow(y_intercept_2 + y_out, 2);// modified due to -x and -y calcs above
x_exit[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2);
y_exit[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2);
}
// Unrotate by 90 degrees, if necessary
if( exit_in_cone )
{
x_temp = x_exit[i];
y_temp = y_exit[i];
x_exit[i] = y_temp;
y_exit[i] = -x_temp;
xy_exit_angle[i] -= PI_OVER_2;
}
/***************************************************************************************************************/
/***************************************** Check z(v) direction ************************************************/
/***************************************************************************************************************/
// Relevant angles/slopes in radians for entry and exit in the uv plane
float uv_entry_slope = ( v_in_2[i] - v_in_1[i] ) / ( u_in_2[i] - u_in_1[i] );
float uv_exit_slope = ( v_out_2[i] - v_out_1[i] ) / ( u_out_2[i] - u_out_1[i] );
float uv_entry_angle = atan2( v_in_2[i] - v_in_1[i], u_in_2[i] - u_in_1[i] );
float uv_exit_angle = atan2( v_out_2[i] - v_out_1[i], u_out_2[i] - u_out_1[i] );
xz_entry_angle[i] = uv_entry_angle;
xz_exit_angle[i] = uv_exit_angle;
if( xz_entry_angle[i] < 0 )
xz_entry_angle[i] += TWO_PI;
if( xz_exit_angle[i] < 0 )
xz_exit_angle[i] += TWO_PI;
// Calculate the u coordinate for the entry and exit points of the reconstruction volume and then use the uv slope calculated
// from the detector entry and exit positions to determine the z position of the proton as it entered and exited the
// reconstruction volume
/*
u-coordinate of the entry and exit points of the reconsruction cylinder can be found using an inverse rotation
u = cos( gantry_angle ) * x + sin( gantry_angle ) * y
*/
float u_entry = ( cosf( rotation_angle_radians ) * x_entry[i] ) + ( sinf( rotation_angle_radians ) * y_entry[i] );
float u_exit = ( cosf(rotation_angle_radians) * x_exit[i] ) + ( sinf(rotation_angle_radians) * y_exit[i] );
z_entry[i] = v_in_2[i] + uv_entry_slope * ( u_entry - u_in_2[i] );
z_exit[i] = v_out_1[i] - uv_exit_slope * ( u_out_1[i] - u_exit );
// Even if the proton path intersected the circle describing the boundary of the cylinder twice, it may not have actually
// passed through the reconstruction volume or may have only passed through part way. If |z_entry|> RECON_CYL_HEIGHT/2 ,
// then something off happened since the the source is around z=0 and we do not want to use this history. If the
// |z_entry| < RECON_CYL_HEIGHT/2 and |z_exit| > RECON_CYL_HEIGHT/2 then we want to use the history but the x_exit and
// y_exit positions need to be calculated again based on how far through the cylinder the proton passed before exiting it
if( entered && exited )
{
if( ( fabs(z_entry[i]) <= RECON_CYL_HEIGHT * 0.5 ) && ( fabs(z_exit[i]) > RECON_CYL_HEIGHT * 0.5 ) )
{
float recon_cyl_fraction = fabs( ( ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5 - z_entry[i] ) / ( z_exit[i] - z_entry[i] ) );
x_exit[i] = x_entry[i] + recon_cyl_fraction * ( x_exit[i] - x_entry[i] );
y_exit[i] = y_entry[i] + recon_cyl_fraction * ( y_exit[i] - y_entry[i] );
z_exit[i] = ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5;
}
else if( fabs(z_entry[i]) > RECON_CYL_HEIGHT * 0.5 )
{
entered = false;
exited = false;
}
// Check the measurement locations. Do not allow more than 5 cm difference in entry and exit in t and v. This gets
// rid of spurious events.
if( ( fabs(t_out_1[i] - t_in_2[i]) > 5 ) || ( fabs(v_out_1[i] - v_in_2[i]) > 5 ) )
{
entered = false;
exited = false;
}
}
relative_ut_angle[i] = ut_exit_angle - ut_entry_angle;
relative_uv_angle[i] = uv_exit_angle - uv_entry_angle;
// Proton passed through the reconstruction volume only if it both entered and exited the reconstruction cylinder
traversed_recon_volume[i] = entered && exited;
}
}
void bin_valid_histories( const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bool = sizeof(bool) * num_histories;
traversed_recon_volume_h = (bool*) calloc( num_histories, sizeof(bool) );
bin_num_h = (int*) calloc( num_histories, sizeof(int) );
x_entry_h = (float*) calloc( num_histories, sizeof(float) );
y_entry_h = (float*) calloc( num_histories, sizeof(float) );
z_entry_h = (float*) calloc( num_histories, sizeof(float) );
x_exit_h = (float*) calloc( num_histories, sizeof(float) );
y_exit_h = (float*) calloc( num_histories, sizeof(float) );
z_exit_h = (float*) calloc( num_histories, sizeof(float) );
xy_entry_angle_h = (float*) calloc( num_histories, sizeof(float) );
xz_entry_angle_h = (float*) calloc( num_histories, sizeof(float) );
xy_exit_angle_h = (float*) calloc( num_histories, sizeof(float) );
xz_exit_angle_h = (float*) calloc( num_histories, sizeof(float) );
relative_ut_angle_h = (float*) calloc( num_histories, sizeof(float) );
relative_uv_angle_h = (float*) calloc( num_histories, sizeof(float) );
hipMalloc((void**) &bin_num_d, mem_size_hist_ints );
hipMemcpy( bin_num_d, bin_num_h, mem_size_hist_ints, hipMemcpyHostToDevice );
dim3 dimBlock( THREADS_PER_BLOCK );
dim3 dimGrid( (int)( num_histories/THREADS_PER_BLOCK ) + 1 );
hipLaunchKernelGGL(( bin_valid_histories_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, bin_counts_d, bin_num_d, traversed_recon_volume_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d,
mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d,
xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d,
relative_ut_angle_d, relative_uv_angle_d
);
hipMemcpy( traversed_recon_volume_h, traversed_recon_volume_d, mem_size_hist_bool, hipMemcpyDeviceToHost );
hipMemcpy( bin_num_h, bin_num_d, mem_size_hist_ints, hipMemcpyDeviceToHost );
hipMemcpy( x_entry_h, x_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( y_entry_h, y_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( z_entry_h, z_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( x_exit_h, x_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( y_exit_h, y_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( z_exit_h, z_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( xy_entry_angle_h, xy_entry_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( xz_entry_angle_h, xz_entry_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( xy_exit_angle_h, xy_exit_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( xz_exit_angle_h, xz_exit_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( relative_ut_angle_h, relative_ut_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
hipMemcpy( relative_uv_angle_h, relative_uv_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost );
char data_filename[128];
if( WRITE_BIN_WEPLS )
{
sprintf(data_filename, "%s_%03d%s", "bin_numbers", gantry_angle_h[0], ".txt" );
write_array_to_disk( data_filename, output_directory, output_folder, bin_num_h, COLUMNS, ROWS, SLICES, num_histories, true );
}
int offset = 0;
for( int i = 0; i < num_histories; i++ )
{
if( traversed_recon_volume_h[i] && ( bin_num_h[i] >= 0 ) )
{
bin_num_vector.push_back( bin_num_h[i] );
//gantry_angle_vector.push_back( gantry_angle_h[i] );
WEPL_vector.push_back( WEPL_h[i] );
x_entry_vector.push_back( x_entry_h[i] );
y_entry_vector.push_back( y_entry_h[i] );
z_entry_vector.push_back( z_entry_h[i] );
x_exit_vector.push_back( x_exit_h[i] );
y_exit_vector.push_back( y_exit_h[i] );
z_exit_vector.push_back( z_exit_h[i] );
xy_entry_angle_vector.push_back( xy_entry_angle_h[i] );
xz_entry_angle_vector.push_back( xz_entry_angle_h[i] );
//xy_exit_angle_vector.push_back( xy_exit_angle_h[i] );
//xz_exit_angle_vector.push_back( xz_exit_angle_h[i] );
relative_ut_angle_vector.push_back( relative_ut_angle_h[i] );
relative_uv_angle_vector.push_back( relative_uv_angle_h[i] );
offset++;
recon_vol_histories++;
}
}
printf( "%d out of %d histories passed intersection cuts this iteration\n", offset, num_histories );
free( traversed_recon_volume_h );
free( bin_num_h );
free( x_entry_h );
free( y_entry_h );
free( z_entry_h );
free( x_exit_h );
free( y_exit_h );
free( z_exit_h );
free( xy_entry_angle_h );
free( xz_entry_angle_h );
free( xy_exit_angle_h );
free( xz_exit_angle_h );
free( relative_ut_angle_h );
free( relative_uv_angle_h );
//hipFree( bin_num_d );
hipFree( xy_entry_angle_d );
hipFree( xz_entry_angle_d );
hipFree( xy_exit_angle_d );
hipFree( xz_exit_angle_d );
hipFree( relative_ut_angle_d );
hipFree( relative_uv_angle_d );
}
__global__ void bin_valid_histories_GPU
(
int num_histories, int* bin_counts, int* bin_num, bool* traversed_recon_volume,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit,
float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* relative_ut_angle, float* relative_uv_angle
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
float x_midpath, y_midpath, z_midpath, path_angle;
int angle_bin, t_bin, v_bin;
float angle, t, v;
x_midpath = ( x_entry[i] + x_exit[i] ) / 2;
y_midpath = ( y_entry[i] + y_exit[i] ) / 2;
z_midpath = ( z_entry[i] + z_exit[i] ) / 2;
path_angle = atan2( ( y_exit[i] - y_entry[i] ) , ( x_exit[i] - x_entry[i] ) );
if( path_angle < 0 )
path_angle += 2*PI;
angle_bin = int( ( path_angle * RADIANS_TO_ANGLE / ANGULAR_BIN_SIZE ) + 0.5) % ANGULAR_BINS;
angle = angle_bin * ANGULAR_BIN_SIZE * ANGLE_TO_RADIANS;
t = y_midpath * cosf(angle) - x_midpath * sinf(angle);
t_bin = int( (t / T_BIN_SIZE ) + T_BINS/2);
v = z_midpath;
v_bin = int( (v / V_BIN_SIZE ) + V_BINS/2);
if( traversed_recon_volume[i] )
{
if( (t_bin >= 0) && (v_bin >= 0) && (t_bin < T_BINS) && (v_bin < V_BINS) )
{
bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS;
atomicAdd( &bin_counts[bin_num[i]], 1 );
atomicAdd( &mean_WEPL[bin_num[i]], WEPL[i] );
atomicAdd( &mean_rel_ut_angle[bin_num[i]], relative_ut_angle[i] );
atomicAdd( &mean_rel_uv_angle[bin_num[i]], relative_uv_angle[i] );
}
else
bin_num[i] = -1;
}
}
}
/************************************************************************************************************************************************************/
/*************************************************************** Statistical Analysis and Cuts **************************************************************/
/************************************************************************************************************************************************************/
void calculate_means()
{
puts("Calculating the Mean for Each Bin Before Cuts...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
hipLaunchKernelGGL(( calculate_means_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0,
bin_counts_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d
);
//hipMemcpy( bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, hipMemcpyDeviceToHost );
//hipMemcpy( mean_WEPL_h, mean_WEPL_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost );
//hipMemcpy( mean_rel_ut_angle_h, mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost );
//hipMemcpy( mean_rel_uv_angle_h, mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost );
//write_array_to_disk("bin_counts_h_pre", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_WEPL_h", output_directory, output_folder, mean_WEPL_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_rel_ut_angle_h", output_directory, output_folder, mean_rel_ut_angle_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_rel_uv_angle_h", output_directory, output_folder, mean_rel_uv_angle_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
free(bin_counts_h);
free(mean_WEPL_h);
free(mean_rel_ut_angle_h);
free(mean_rel_uv_angle_h);
}
__global__ void calculate_means_GPU( int* bin_counts, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle )
{
int v = blockIdx.x;
int angle = blockIdx.y;
int t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
{
mean_WEPL[bin] /= bin_counts[bin];
mean_rel_ut_angle[bin] /= bin_counts[bin];
mean_rel_uv_angle[bin] /= bin_counts[bin];
}
}
void sum_squared_deviations( const int start_position, const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
hipMalloc((void**) &bin_num_d, mem_size_hist_ints);
hipMalloc((void**) &WEPL_d, mem_size_hist_floats);
hipMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
//hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
//hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
hipMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats);
hipMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats);
hipMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, hipMemcpyHostToDevice);
hipMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
hipMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
hipMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
//hipMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
//hipMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
hipMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
hipMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice);
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
hipLaunchKernelGGL(( sum_squared_deviations_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, bin_num_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d,
WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d,
stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, relative_ut_angle_d, relative_uv_angle_d
);
hipFree( bin_num_d );
hipFree( WEPL_d );
hipFree( xy_entry_angle_d );
hipFree( xz_entry_angle_d );
//hipFree( xy_exit_angle_d );
//hipFree( xz_exit_angle_d );
hipFree( relative_ut_angle_d );
hipFree( relative_uv_angle_d );
}
__global__ void sum_squared_deviations_GPU
(
int num_histories, int* bin_num, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle,
float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, float* relative_ut_angle, float* relative_uv_angle
)
{
float WEPL_difference, rel_ut_angle_difference, rel_uv_angle_difference;
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
/* float ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
if( fabs(ut_diff) > PI )
{
printf("Hello\n");
if( xy_entry_angle[i] > PI )
xy_entry_angle[i] -= TWO_PI;
if( xy_exit_angle[i] > PI )
xy_exit_angle[i] -= TWO_PI;
ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
}
float uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
if( fabs(uv_diff) > PI )
{
if( xz_entry_angle[i] > PI )
xz_entry_angle[i] -= TWO_PI;
if( xz_exit_angle[i] > PI )
xz_exit_angle[i] -= TWO_PI;
uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
}*/
WEPL_difference = WEPL[i] - mean_WEPL[bin_num[i]];
rel_ut_angle_difference = relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]];
rel_uv_angle_difference = relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]];
//rel_ut_angle_difference = ut_diff - mean_rel_ut_angle[bin_num[i]];
//rel_uv_angle_difference = uv_diff - mean_rel_uv_angle[bin_num[i]];
atomicAdd( &stddev_WEPL[bin_num[i]], WEPL_difference * WEPL_difference);
atomicAdd( &stddev_rel_ut_angle[bin_num[i]], rel_ut_angle_difference * rel_ut_angle_difference );
atomicAdd( &stddev_rel_uv_angle[bin_num[i]], rel_uv_angle_difference * rel_uv_angle_difference );
}
}
void calculate_standard_deviations()
{
puts("Calculating standard deviations for each bin...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
hipLaunchKernelGGL(( calculate_standard_deviations_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0,
bin_counts_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d
);
//hipFree( bin_counts_d );
}
__global__ void calculate_standard_deviations_GPU( int* bin_counts, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle )
{
int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
{
// SAMPLE_STD_DEV = true/false = 1/0 => std_dev = SUM{i = 1 -> N} [ ( mu - x_i)^2 / ( N - 1/0 ) ]
stddev_WEPL[bin] = sqrtf( stddev_WEPL[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
stddev_rel_ut_angle[bin] = sqrtf( stddev_rel_ut_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
stddev_rel_uv_angle[bin] = sqrtf( stddev_rel_uv_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
}
syncthreads();
bin_counts[bin] = 0;
}
void statistical_cuts( const int start_position, const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bools = sizeof(bool) * num_histories;
passed_cuts_h = (bool*) calloc (num_histories, sizeof(bool) );
hipMalloc( (void**) &bin_num_d, mem_size_hist_ints );
hipMalloc( (void**) &WEPL_d, mem_size_hist_floats );
hipMalloc( (void**) &xy_entry_angle_d, mem_size_hist_floats );
hipMalloc( (void**) &xz_entry_angle_d, mem_size_hist_floats );
//hipMalloc( (void**) &xy_exit_angle_d, mem_size_hist_floats );
//hipMalloc( (void**) &xz_exit_angle_d, mem_size_hist_floats );
hipMalloc( (void**) &relative_ut_angle_d, mem_size_hist_floats );
hipMalloc( (void**) &relative_uv_angle_d, mem_size_hist_floats );
hipMalloc( (void**) &passed_cuts_d, mem_size_hist_bools );
hipMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, hipMemcpyHostToDevice );
hipMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
hipMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
hipMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
//hipMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
//hipMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
hipMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
hipMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice );
hipMemcpy( passed_cuts_d, passed_cuts_h, mem_size_hist_bools, hipMemcpyHostToDevice );
//puts("Before kernel");
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid( int( num_histories / THREADS_PER_BLOCK ) + 1 );
hipLaunchKernelGGL(( statistical_cuts_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0,
num_histories, bin_counts_d, bin_num_d, sinogram_d, WEPL_d,
xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d,
mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d,
stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d,
passed_cuts_d, relative_ut_angle_d, relative_uv_angle_d
);
//puts("After kernel");
hipMemcpy( passed_cuts_h, passed_cuts_d, mem_size_hist_bools, hipMemcpyDeviceToHost);
//printf("start iteration %d\n", iteration );
for( int i = 0; i < num_histories; i++ )
{
if( passed_cuts_h[i] )
{
//printf("start i = %d\n", i );
//printf("index = %d\n", start_position + i );
bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i];
//gantry_angle_vector[post_cut_histories] = gantry_angle_vector[start_position + i];
WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i];
x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i];
y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i];
z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i];
x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i];
y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i];
z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i];
xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i];
xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i];
//xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i];
//xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i];
relative_ut_angle_vector[post_cut_histories] = relative_ut_angle_vector[start_position + i];
relative_uv_angle_vector[post_cut_histories] = relative_uv_angle_vector[start_position + i];
post_cut_histories++;
}
}
//printf("end iteration %d\n", iteration );
}
__global__ void statistical_cuts_GPU
(
int num_histories, int* bin_counts, int* bin_num, float* sinogram, float* WEPL,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle,
float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle,
bool* passed_cuts, float* relative_ut_angle, float* relative_uv_angle
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
/*float ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
if( ut_diff > PI )
{
if( xy_entry_angle[i] > PI )
xy_entry_angle[i] -= TWO_PI;
if( xy_exit_angle[i] > PI )
xy_exit_angle[i] -= TWO_PI;
ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
}
float uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
if( uv_diff > PI )
{
if( xz_entry_angle[i] > PI )
xz_entry_angle[i] -= TWO_PI;
if( xz_exit_angle[i] > PI )
xz_exit_angle[i] -= TWO_PI;
uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
}*/
bool passed_ut_cut = ( fabs( relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) );
bool passed_uv_cut = ( fabs( relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );
/*bool passed_ut_cut = ( fabs( ut_diff - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) );
bool passed_uv_cut = ( fabs( uv_diff - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );*/
bool passed_WEPL_cut = ( fabs( mean_WEPL[bin_num[i]] - WEPL[i] ) <= ( SIGMAS_TO_KEEP * stddev_WEPL[bin_num[i]] ) );
passed_cuts[i] = passed_ut_cut && passed_uv_cut && passed_WEPL_cut;
if( passed_cuts[i] )
{
atomicAdd( &sinogram[bin_num[i]], WEPL[i] );
atomicAdd( &bin_counts[bin_num[i]], 1 );
}
}
}
/************************************************************************************************************************************************************/
/*********************************************************************** MLP ********************************************************************************/
/************************************************************************************************************************************************************/
void create_MLP_test_image()
{
double x, y;
//Create space carve object, init to zeros
MLP_test_image_h = (int*)calloc( MLP_IMAGE_VOXELS, sizeof(int));
for( int slice = 0; slice < MLP_IMAGE_SLICES; slice++ )
{
for( int row = 0; row < MLP_IMAGE_ROWS; row++ )
{
for( int column = 0; column < MLP_IMAGE_COLUMNS; column++ )
{
x = ( column - MLP_IMAGE_COLUMNS/2 + 0.5) * MLP_IMAGE_VOXEL_WIDTH;
y = ( MLP_IMAGE_ROWS/2 - row - 0.5 ) * MLP_IMAGE_VOXEL_HEIGHT;
if( pow( x, 2 ) + pow( y, 2 ) <= pow( double(MLP_IMAGE_RECON_CYL_RADIUS), 2) )
MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 1;
if( pow( x / MLP_PHANTOM_A, 2 ) + pow( y / MLP_PHANTOM_B, 2 ) <= 1 )
MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 8;
}
}
}
}
//void MLP_entry_exit
//(
// int*& image, bool entry,
// float x_start, float y_start, float z_start,
// float xy_angle, float xz_angle,
// float x_object, float y_object, float z_object
//)
//{
// /********************************************************************************************/
// /********************************* Voxel Walk Parameters ************************************/
// /********************************************************************************************/
// int x_move_direction, y_move_direction, z_move_direction;
// int x_voxel_step, y_voxel_step, z_voxel_step;
// float delta_x, delta_y, delta_z;
// float x_move, y_move, z_move;
// /********************************************************************************************/
// /**************************** Status Tracking Information ***********************************/
// /********************************************************************************************/
// float x, y, z;
// float x_inside, y_inside, z_inside;
// float x_to_go, y_to_go, z_to_go;
// float x_extension, y_extension;
// float voxel_x, voxel_y, voxel_z;
// float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
// int voxel;
// bool outside_image, end_walk;
// /********************************************************************************************/
// /************************** Initial and Boundary Conditions *********************************/
// /********************************************************************************************/
// // Initial Distance Into Voxel
// x_inside = modf( ( x_start + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
// y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
// z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
//
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
// voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT );
// voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS );
// voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
// /********************************************************************************************/
// /***************************** Path and Walk Information ************************************/
// /********************************************************************************************/
// // Lengths/Distances as x is Incremented One Voxel
// delta_x = VOXEL_WIDTH;
// delta_y = abs( (y_exit - y_entry)/(x_exit - x_start) * VOXEL_WIDTH );
// delta_z = abs( (z_exit - z_entry)/(x_exit - x_start) * VOXEL_WIDTH );
// // Overwrite NaN if Divisors on delta_i Calculations Above
// if( x_start == x_exit )
// {
// delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
// delta_y = VOXEL_HEIGHT;
// delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
// if( y_entry == y_exit )
// {
// delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );
// delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );;
// delta_z = VOXEL_THICKNESS;
// }
// }
// x_move = 0, y_move = 0, z_move = 0;
// /*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
// y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
// z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/
// x_move_direction = ( cosf(xy_angle) >= 0 ) - ( cosf(xy_angle) < 0 );
// y_move_direction = ( sinf(xy_angle) >= 0 ) - ( sinf(xy_angle) < 0 );
// z_move_direction = ( sinf(xz_angle) >= 0 ) - ( sinf(xz_angle) < 0 );
// x_voxel_step = x_move_direction;
// y_voxel_step = -y_move_direction;
// z_voxel_step = -z_move_direction;
// /********************************************************************************************/
// /**************************** Status Tracking Information ***********************************/
// /********************************************************************************************/
// x = x_entry, y = y_entry, z = z_entry;
// x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
// y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
// z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
//
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// //fgets(user_response, sizeof(user_response), stdin);
// /********************************************************************************************/
// /*********************************** Voxel Walk Routine *************************************/
// /********************************************************************************************/
// if( z_entry != z_exit )
// {
// while( !end_walk )
// {
// // Change in z for Move to Voxel Edge in x and y
// x_extension = delta_z/delta_x * x_to_go;
// y_extension = delta_z/delta_y * y_to_go;
// if( z_to_go <= x_extension && z_to_go <= y_extension )
// {
// //printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
// x_move = delta_x / delta_z * z_to_go;
// y_move = delta_y / delta_z * z_to_go;
// z_move = z_to_go;
// x_to_go -= x_move;
// y_to_go -= y_move;
// z_to_go = VOXEL_THICKNESS;
// voxel_z += z_voxel_step;
// if( x_to_go == 0 )
// {
// voxel_x += x_voxel_step;
// x_to_go = VOXEL_WIDTH;
// }
// if( y_to_go == 0 )
// {
// voxel_y += y_voxel_step;
// y_to_go = VOXEL_HEIGHT;
// }
// }
// //If Next Voxel Edge is in x or xy Diagonal
// else if( x_extension <= y_extension )
// {
// //printf(" x_extension <= y_extension \n");
// x_move = x_to_go;
// y_move = delta_y / delta_x * x_to_go;
// z_move = delta_z / delta_x * x_to_go;
// x_to_go = VOXEL_WIDTH;
// y_to_go -= y_move;
// z_to_go -= z_move;
// voxel_x += x_voxel_step;
// if( y_to_go == 0 )
// {
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// }
// // Else Next Voxel Edge is in y
// else
// {
// //printf(" y_extension < x_extension \n");
// x_move = delta_x / delta_y * y_to_go;
// y_move = y_to_go;
// z_move = delta_z / delta_y * y_to_go;
// x_to_go -= x_move;
// y_to_go = VOXEL_HEIGHT;
// z_to_go -= z_move;
// voxel_y += y_voxel_step;
// }
// x += x_move_direction * x_move;
// y += y_move_direction * y_move;
// z += z_move_direction * z_move;
// //fgets(user_response, sizeof(user_response), stdin);
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// }
// }
// else
// {
// //printf("z_exit == z_entry\n");
// while( !end_walk )
// {
// // Change in x for Move to Voxel Edge in y
// y_extension = delta_x/delta_y * y_to_go;
// //If Next Voxel Edge is in x or xy Diagonal
// if( x_to_go <= y_extension )
// {
// //printf(" x_to_go <= y_extension \n");
// x_move = x_to_go;
// y_move = delta_y / delta_x * x_to_go;
// x_to_go = VOXEL_WIDTH;
// y_to_go -= y_move;
// voxel_x += x_voxel_step;
// if( y_to_go == 0 )
// {
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// }
// // Else Next Voxel Edge is in y
// else
// {
// //printf(" y_extension < x_extension \n");
// x_move = delta_x / delta_y * y_to_go;
// y_move = y_to_go;
// x_to_go -= x_move;
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// x += x_move_direction * x_move;
// y += y_move_direction * y_move;
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// //fgets(user_response, sizeof(user_response), stdin);
// }// end: while( !end_walk )
// }//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h
//}
void MLP_test()
{
char user_response[20];
float x_entry = -3.0;
float y_entry = -sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_entry,2) );
float z_entry = 0.0;
float x_exit = 2.5;
float y_exit = sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_exit,2) );
float z_exit = 0.0;
float xy_entry_angle = 25 * PI/180, xz_entry_angle = 0.0;
float xy_exit_angle = 45* PI/180, xz_exit_angle = 0.0;
float x_in_object, y_in_object, z_in_object;
float u_in_object, t_in_object, v_in_object;
float x_out_object, y_out_object, z_out_object;
float u_out_object, t_out_object, v_out_object;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float voxel_x, voxel_y, voxel_z;
int voxel;
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float delta_x, delta_y, delta_z;
float x_extension, y_extension;
float x_move, y_move, z_move;
bool end_walk, outside_image;
bool entered_object = false, exited_object = false;
/********************************************************************************************************/
/******************** Determine if and Where the Proton Enters the Actual Object ************************/
/********************************************************************************************************/
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_entry ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_entry ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("voxel = %d \n", voxel );
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = MLP_IMAGE_VOXEL_WIDTH;
delta_y = tanf( xy_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH;
delta_z = tanf( xz_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH;
if( x_entry == x_exit )
{
delta_x = 0;
delta_y = MLP_IMAGE_VOXEL_HEIGHT;
delta_z = tanf(xz_entry_angle) / tanf(xy_entry_angle) * MLP_IMAGE_VOXEL_HEIGHT;
if( y_entry == y_exit )
{
delta_x = 0;
delta_y = 0;
delta_z = MLP_IMAGE_VOXEL_THICKNESS;
}
}
//printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z );
x_move = 0, y_move = 0, z_move = 0;
/*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/
x_move_direction = ( cosf(xy_entry_angle) >= 0 ) - ( cosf(xy_entry_angle) < 0 );
y_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 );
z_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry, y = y_entry, z = z_entry;
x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside;
//printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
end_walk = entered_object || outside_image;
///********************************************************************************************/
///*********************************** Voxel Walk Routine *************************************/
///********************************************************************************************/
if( z_entry != z_exit )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = MLP_IMAGE_VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
end_walk = entered_object || outside_image;
}
}
else
{
//printf("z_exit == z_entry\n");
while( !end_walk )
{
//printf("beginning of loop\n\n");
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//printf("y_extension = %3f\n", y_extension);
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("end of loop\n\n");
//printf("x_move = %3f y_move = %3f\n", x_move, y_move );
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
//printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f", MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 );
x += x_move_direction * x_move;
y += y_move_direction * y_move;
end_walk = entered_object || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry != z_exit => z_entry == z_exit
if( entered_object )
{
x_in_object = x;
y_in_object = y;
z_in_object = z;
}
/********************************************************************************************************/
/******************** Determine if and Where the Proton Exited the Actual Object ************************/
/********************************************************************************************************/
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_exit + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_exit ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_exit ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("voxel = %d \n", voxel );
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = MLP_IMAGE_VOXEL_WIDTH;
delta_y = tanf( xy_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH;
delta_z = tanf( xz_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH;
if( x_entry == x_exit )
{
delta_x = 0;
delta_y = MLP_IMAGE_VOXEL_HEIGHT;
delta_z = tanf(xz_exit_angle) / tanf(xy_exit_angle) * MLP_IMAGE_VOXEL_HEIGHT;
if( y_entry == y_exit )
{
delta_x = 0;
delta_y = 0;
delta_z = MLP_IMAGE_VOXEL_THICKNESS;
}
}
//printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z );
x_move = 0, y_move = 0, z_move = 0;
//x_move_direction = ( x_exit <= x_entry ) - ( x_exit > x_entry );
//y_move_direction = ( y_exit <= y_entry ) - ( y_exit > y_entry );
//z_move_direction = ( z_exit <= z_entry ) - ( z_exit > z_entry );
x_move_direction = ( cosf(xy_exit_angle) < 0 ) - ( cosf(xy_exit_angle) >= 0 );
y_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 );
z_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_exit, y = y_exit, z = z_exit;
x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside;
//printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
end_walk = exited_object || outside_image;
///********************************************************************************************/
///*********************************** Voxel Walk Routine *************************************/
///********************************************************************************************/
if( z_entry != z_exit )
{
//printf("z_entry != z_exit\n");
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = MLP_IMAGE_VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
voxel = int( voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS );
outside_image = ( voxel_x >= MLP_IMAGE_COLUMNS ) || ( voxel_y >= MLP_IMAGE_ROWS ) || ( voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
end_walk = exited_object || outside_image;
}
}
else
{
//printf("z_entry == z_exit\n");
while( !end_walk )
{
//printf("beginning of loop\n\n");
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//printf("y_extension = %3f\n", y_extension);
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
/*printf("end of loop\n\n");
printf("x_move = %3f y_move = %3f\n", x_move, y_move );
printf("x = %3f y = %3f z = %3f\n", x, y, z );
printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);*/
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
//printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f",MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 );
x += x_move_direction * x_move;
y += y_move_direction * y_move;
end_walk = exited_object || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_exit != z_exit => z_exit == z_exit
if( exited_object )
{
x_out_object = x;
y_out_object = y;
z_out_object = z;
}
x_inside = modf( ( x_in_object + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_in_object ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_in_object ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
int path[1000];
int path_index = 0;
double chord_lengths[1000];
MLP_test_image_h[voxel] = 0;
path[path_index++] = voxel;
u_in_object = ( cosf( xy_entry_angle ) * x_in_object ) + ( sinf( xy_entry_angle ) * y_in_object );
u_out_object = ( cosf( xy_entry_angle ) * x_out_object ) + ( sinf( xy_entry_angle ) * y_out_object );
t_in_object = ( cosf( xy_entry_angle ) * y_in_object ) - ( sinf( xy_entry_angle ) * x_in_object );
t_out_object = ( cosf( xy_entry_angle ) * y_out_object ) - ( sinf( xy_entry_angle ) * x_out_object );
v_in_object = z_in_object;
v_out_object = z_out_object;
double T_0[2] = { t_in_object, 0 };
double T_2[2] = { t_out_object, xy_exit_angle - xy_entry_angle };
double V_0[2] = { v_in_object, xz_entry_angle };
double V_2[2] = { v_out_object, xz_exit_angle };
double u_2 = abs(u_out_object - u_in_object);
double u_0 = 0, u_1 = MLP_u_step;
double t_1_previous, v_1_previous;
double x_1_previous = x, y_1_previous = y, z_1_previous = z;
int voxel_x_previous = voxel_x;
int voxel_y_previous = voxel_y;
int voxel_z_previous = voxel_z;
int voxel_previous = voxel;
int voxels_passed;
double chord_segment;
double chord_fraction;
double x_to_edge, y_to_edge, z_to_edge;
//fgets(user_response, sizeof(user_response), stdin);
while( u_1 <= u_2 - MLP_u_step )
{
double R_0[4] = { 1.0, u_1 - u_0, 0.0 , 1.0}; //a,b,c,d
double R_0T[4] = { 1.0, 0.0, u_1 - u_0 , 1.0}; //a,c,b,d
double R_1[4] = { 1.0, u_2 - u_1, 0.0 , 1.0}; //a,b,c,d
double R_1T[4] = { 1.0, 0.0, u_2 - u_1 , 1.0}; //a,c,b,d
double sigma_1_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_1 - u_0)/X_0) ), 2.0 ) / X_0;
float sigma_t1 = (A_0/3)*pow(u_1, 3.0) + (A_1/12)*pow(u_1, 4.0) + (A_2/30)*pow(u_1, 5.0) + (A_3/60)*pow(u_1, 6.0) + (A_4/105)*pow(u_1, 7.0) + (A_5/168)*pow(u_1, 8.0);
float sigma_t1_theta1 = pow(u_1, 2.0 )*( (A_0/2) + (A_1/6)*u_1 + (A_2/12)*pow(u_1, 2.0) + (A_3/20)*pow(u_1, 3.0) + (A_4/30)*pow(u_1, 4.0) + (A_5/42)*pow(u_1, 5.0) );
float sigma_theta1 = A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0);
double determinant_Sigma_1 = sigma_t1 * sigma_theta1 - pow( sigma_t1_theta1, 2 );//ad-bc
double Sigma_1I[4] = // Sigma_1 Inverse = [1/det(Sigma_1)]*{ d, -b, -c, a }
{
sigma_theta1 / determinant_Sigma_1,
-sigma_t1_theta1 / determinant_Sigma_1,
-sigma_t1_theta1 / determinant_Sigma_1,
sigma_t1 / determinant_Sigma_1
};
double sigma_2_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_2 - u_1)/X_0 ) ), 2.0 ) / X_0;
double sigma_t2 = (A_0/3)*pow(u_2, 3.0) + (A_1/12)*pow(u_2, 4.0) + (A_2/30)*pow(u_2, 5.0) + (A_3/60)*pow(u_2, 6.0) + (A_4/105)*pow(u_2, 7.0) + (A_5/168)*pow(u_2, 8.0)
- (A_0/3)*pow(u_1, 3.0) - (A_1/4)*pow(u_1, 4.0) - (A_2/5)*pow(u_1, 5.0) - (A_3/6)*pow(u_1, 6.0) - (A_4/7)*pow(u_1, 7.0) - (A_5/8)*pow(u_1, 8.0)
+ 2*u_2*( (A_0/2)*pow(u_1, 2.0) + (A_1/3)*pow(u_1, 3.0) + (A_2/4)*pow(u_1, 4.0) + (A_3/5)*pow(u_1, 5.0) + (A_4/6)*pow(u_1, 6.0) + (A_5/7)*pow(u_1, 7.0) )
- pow(u_2, 2.0) * ( A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0) );
double sigma_t2_theta2 = pow(u_2, 2.0 )*( (A_0/2) + (A_1/6)*u_2 + (A_2/12)*pow(u_2, 2.0) + (A_3/20)*pow(u_2, 3.0) + (A_4/30)*pow(u_2, 4.0) + (A_5/42)*pow(u_2, 5.0) )
- u_2*u_1*( A_0 + (A_1/2)*u_1 + (A_2/3)*pow(u_1, 2.0) + (A_3/4)*pow(u_1, 3.0) + (A_4/5)*pow(u_1, 4.0) + (A_5/6)*pow(u_1, 5.0) )
+ pow(u_1, 2.0 )*( (A_0/2) + (A_1/3)*u_1 + (A_2/4)*pow(u_1, 2.0) + (A_3/5)*pow(u_1, 3.0) + (A_4/6)*pow(u_1, 4.0) + (A_5/7)*pow(u_1, 5.0) );
double sigma_theta2 = A_0 * ( u_2 - u_1 ) + ( A_1 / 2 ) * ( pow(u_2, 2.0) - pow(u_1, 2.0) ) + ( A_2 / 3 ) * ( pow(u_2, 3.0) - pow(u_1, 3.0) )
+ ( A_3 / 4 ) * ( pow(u_2, 4.0) - pow(u_1, 4.0) ) + ( A_4 / 5 ) * ( pow(u_2, 5.0) - pow(u_1, 5.0) ) + ( A_5 /6 )*( pow(u_2, 6.0) - pow(u_1, 6.0) );
double determinant_Sigma_2 = sigma_t2 * sigma_theta2 - pow( sigma_t2_theta2, 2 );//ad-bc
double Sigma_2I[4] = // Sigma_2 Inverse = [1/det(Sigma_2)]*{ d, -b, -c, a }
{
sigma_theta2 / determinant_Sigma_2,
-sigma_t2_theta2 / determinant_Sigma_2,
-sigma_t2_theta2 / determinant_Sigma_2,
sigma_t2 / determinant_Sigma_2
};
double first_term[4] =
{
Sigma_1I[0] + R_1T[0] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[1] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ),
Sigma_1I[1] + R_1T[0] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[1] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ),
Sigma_1I[2] + R_1T[2] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[3] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ),
Sigma_1I[3] + R_1T[2] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[3] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] )
};
double determinant_first_term = first_term[0] * first_term[3] - first_term[1] * first_term[2];
first_term[0] = first_term[3] / determinant_first_term;
first_term[1] = -first_term[1] / determinant_first_term;
first_term[2] = -first_term[2] / determinant_first_term;
first_term[3] = first_term[0] / determinant_first_term;
double second_term[2] =
{
Sigma_1I[0] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] )
+ Sigma_1I[1] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] )
+ R_1T[0] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] )
+ R_1T[1] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] )
,
Sigma_1I[2] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] )
+ Sigma_1I[3] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] )
+ R_1T[2] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] )
+ R_1T[3] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] )
};
double t_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1];
double theta_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1];
// Do v MLP Now
second_term[0] = Sigma_1I[0] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] )
+ Sigma_1I[1] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] )
+ R_1T[0] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] )
+ R_1T[1] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] );
second_term[1] = Sigma_1I[2] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] )
+ Sigma_1I[3] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] )
+ R_1T[2] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] )
+ R_1T[3] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] );
double v_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1];
double phi_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1];
// Rotate Coordinate From utv to xyz Coordinate System and Determine Which Voxel this Point on the MLP Path is in
double x_1 = ( cosf( xy_entry_angle ) * (u_in_object + u_1) ) - ( sinf( xy_entry_angle ) * t_1 );
double y_1 = ( sinf( xy_entry_angle ) * (u_in_object + u_1) ) + ( cosf( xy_entry_angle ) * t_1 );
double z_1 = v_in_object + v_1;
x_inside = modf( ( x_1 + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_1 ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_1 ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
x_voxel_step = (voxel_x >= voxel_x_previous ) - (voxel_x <= voxel_x_previous );
y_voxel_step = (voxel_y >= voxel_y_previous ) - (voxel_y <= voxel_y_previous );
z_voxel_step = (voxel_z >= voxel_z_previous ) - (voxel_z <= voxel_z_previous );
x_to_edge = (x_voxel_step < 0) * x_inside + (x_voxel_step > 0) * (VOXEL_WIDTH - x_inside);
y_to_edge = (y_voxel_step < 0) * y_inside + (y_voxel_step > 0) * (VOXEL_HEIGHT - y_inside);
z_to_edge = (z_voxel_step < 0) * z_inside + (z_voxel_step > 0) * (VOXEL_THICKNESS - z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
if( voxel != path[path_index - 1] )
path[path_index++] = voxel;
for( int i = 0; i < path_index; i++ )
printf( "path[i] = %d\n", path[i] );
printf( "path_index = %d\n\n", path_index );
fgets(user_response, sizeof(user_response), stdin);
MLP_test_image_h[voxel] = 0;
voxels_passed = (voxel_x - voxel_x_previous) + (voxel_y - voxel_y_previous) + (voxel_z - voxel_z_previous);
chord_segment = sqrt( pow( x_1_previous - x_1, 2 ) + pow( y_1_previous - y_1, 2 ) + pow( z_1_previous - z_1, 2 ) );
if( voxels_passed == 0 )
{
chord_lengths[path_index - 1] += chord_segment;
}
else if( voxels_passed == 1 )
{
if( x_voxel_step != 0 )
{
chord_fraction = x_to_edge / (x_1_previous - x_1);
}
else if( y_voxel_step != 0 )
{
chord_fraction = y_to_edge / (y_1_previous - y_1);
}
else
{
chord_fraction = z_to_edge / (z_1_previous - z_1);
}
chord_lengths[path_index - 1] += chord_fraction * chord_segment;
chord_lengths[path_index] += chord_segment - chord_lengths[path_index - 1];
}
else if( voxels_passed == 2 )
{
}
else if( voxels_passed == 3 )
{
}
u_1 += MLP_u_step;
t_1_previous = t_1;
v_1_previous = v_1;
x_1_previous = x_1;
y_1_previous = y_1;
z_1_previous = z_1;
voxel_x_previous = voxel_x;
voxel_y_previous = voxel_y;
voxel_z_previous = voxel_z;
voxel_previous = voxel;
}
}
/************************************************************************************************************************************************************/
/************************************************************************ FBP *******************************************************************************/
/************************************************************************************************************************************************************/
void initialize_sinogram()
{
puts("Allocating host/GPU memory and initializing sinogram...");
sinogram_h = (float*) calloc( NUM_BINS, sizeof(float) );
hipMalloc((void**) &sinogram_d, MEM_SIZE_BINS_FLOATS );
hipMemcpy( sinogram_d, sinogram_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice );
}
void construct_sinogram()
{
puts("Recalculating the mean WEPL for each bin and constructing the sinogram...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
hipLaunchKernelGGL(( construct_sinogram_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, bin_counts_d, sinogram_d );
//hipMemcpy(sinogram_h, sinogram_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost);
//write_array_to_disk("sinogram", output_directory, output_folder, sinogram_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, false );
//bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) );
//hipMemcpy(bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, hipMemcpyDeviceToHost) ;
//write_array_to_disk( "bin_counts_post", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
}
__global__ void construct_sinogram_GPU( int* bin_counts, float* sinogram )
{
int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
sinogram[bin] /= bin_counts[bin];
}
void filter()
{
puts("Doing the filtering...");
sinogram_filtered_h = (float*) calloc( NUM_BINS, sizeof(float) );
hipMalloc((void**) &sinogram_filtered_d, MEM_SIZE_BINS_FLOATS);
hipMemcpy( sinogram_filtered_d, sinogram_filtered_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice);
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
hipLaunchKernelGGL(( filter_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, sinogram_d, sinogram_filtered_d );
hipMemcpy(sinogram_filtered_h, sinogram_filtered_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost) ;
free(sinogram_h);
hipFree(sinogram_d);
hipFree(sinogram_filtered_d);
}
__global__ void filter_GPU( float* sinogram, float* sinogram_filtered )
{
int t_bin_ref,angle_bin,t_bin,v_bin,t_bin_sep;
float filtered,t,v,scale_factor;
v_bin = blockIdx.x;
angle_bin = blockIdx.y;
t_bin = threadIdx.x;
v = ( v_bin - V_BINS/2 ) * V_BIN_SIZE + V_BIN_SIZE/2.0;
// Loop over strips for this strip
for( t_bin_ref = 0; t_bin_ref < T_BINS; t_bin_ref++ )
{
t = ( t_bin_ref - T_BINS/2 ) * T_BIN_SIZE + T_BIN_SIZE/2.0;
t_bin_sep = t_bin - t_bin_ref;
// scale_factor = r . path = cos(theta_{r,path})
scale_factor = SOURCE_RADIUS / sqrtf( SOURCE_RADIUS * SOURCE_RADIUS + t * t + v * v );
switch( FILTER_NUM )
{
case 0: // Ram-Lak
if( t_bin_sep == 0 )
filtered = 1.0 / ( 8.0 * powf( T_BIN_SIZE, 2.0 ) );
else if( t_bin_sep % 2 == 0 )
filtered = 0;
else
filtered = -1.0 / ( 2.0 * powf( T_BIN_SIZE * PI * t_bin_sep, 2.0 ) );
case 1: // Shepp-Logan filter
filtered = powf( powf(T_BIN_SIZE * PI, 2.0) * ( 1.0 - powf(2 * t_bin_sep, 2.0) ), -1.0 );
}
int strip_index = ( v_bin * ANGULAR_BINS * T_BINS ) + ( angle_bin * T_BINS );
sinogram_filtered[strip_index + t_bin] += T_BIN_SIZE * sinogram[strip_index + t_bin_ref] * filtered * scale_factor;
}
}
void backprojection()
{
puts("Doing the backprojection...");
printf("DEBUG: MEM_SIZE_IMAGE_FLOAT = %u\n", MEM_SIZE_IMAGE_FLOAT);
// Allocate host memory
puts("DEBUG: Allocate host memory");
char user_response[20];
X_h = (float*) calloc( VOXELS, sizeof(float) );
if( X_h == NULL )
{
printf("ERROR: Memory not allocated for X_h!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
// Check that we don't have any corruptions up until now
for( int i = 0; i < NUM_BINS; i++ )
if( sinogram_filtered_h[i] != sinogram_filtered_h[i] )
printf("We have a nan in bin #%d\n", i);
float delta = GANTRY_ANGLE_INTERVAL * ANGLE_TO_RADIANS;
// Loop over the voxels
for( int slice = 0; slice < SLICES; slice++ )
{
for( int column = 0; column < COLUMNS; column++ )
{
for( int row = 0; row < ROWS; row++ )
{
float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH;
float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT;
float z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS;
//// If the voxel is outside a cylinder contained in the reconstruction volume, set to air
if( ( x * x + y * y ) > ( RECON_CYL_RADIUS * RECON_CYL_RADIUS ) )
X_h[( slice * COLUMNS * ROWS) + ( row * COLUMNS ) + column] = 0.00113;
else
{
// Sum over projection angles
for( int angle_bin = 0; angle_bin < ANGULAR_BINS; angle_bin++ )
{
// Rotate the pixel position to the beam-detector co-ordinate system
float u = x * cosf( angle_bin * delta ) + y * sinf( angle_bin * delta );
float t = -x * sinf( angle_bin * delta ) + y * cosf( angle_bin * delta );
float v = z;
// Project to find the detector number
float detector_number_t = ( t - u *( t / ( SOURCE_RADIUS + u ) ) ) / T_BIN_SIZE + T_BINS/2.0;
int t_bin = int( detector_number_t);
if( t_bin > detector_number_t )
t_bin -= 1;
float eta = detector_number_t - t_bin;
// Now project v to get detector number in v axis
float detector_number_v = ( v - u * ( v / ( SOURCE_RADIUS + u ) ) ) / V_BIN_SIZE + V_BINS/2.0;
int v_bin = int( detector_number_v);
if( v_bin > detector_number_v )
v_bin -= 1;
float epsilon = detector_number_v - v_bin;
// Calculate the fan beam scaling factor
float scale_factor = powf( SOURCE_RADIUS / ( SOURCE_RADIUS + u ), 2 );
//bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS;
// Compute the back-projection
int bin = t_bin + angle_bin * T_BINS + v_bin * ANGULAR_BINS * T_BINS;
int voxel = slice * COLUMNS * ROWS + row * COLUMNS + column;
// not sure why this won't compile without calculating the index ahead of time instead inside []s
int index = ANGULAR_BINS * T_BINS;
//if( ( ( bin + ANGULAR_BINS * T_BINS + 1 ) >= NUM_BINS ) || ( bin < 0 ) );
if( v_bin == V_BINS - 1 || ( bin < 0 ) )
{
X_h[voxel] += delta * 2 *( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]) * scale_factor;
}
//printf("The bin selected for this voxel does not exist!\n Slice: %d\n Column: %d\n Row: %d\n", slice, column, row);
else
{
// not sure why this won't compile without calculating the index ahead of time instead inside []s
/*X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;*/
X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;
// Multilpying by the gantry angle interval for each gantry angle is equivalent to multiplying the final answer by 2*PI and is better numerically
// so multiplying by delta each time should be replaced by X_h[voxel] *= 2 * PI after all contributions have been made, which is commented out below
/*X_h[voxel] += scale_factor * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] );*/
if(X_h[voxel]!=X_h[voxel])
printf("We have a nan in slice %d, column %d, and row %d\n", slice, column, row);
}
//X_h[voxel] *= 2 * PI;
}
}
}
}
}
free(sinogram_filtered_h);
FBP_object_h = (int*) calloc( COLUMNS * ROWS * SLICES, sizeof(int) );
for( int slice = 0; slice < SLICES; slice++ )
{
for( int row = 0; row < ROWS; row++ )
{
for( int column = 0; column < COLUMNS; column++ )
{
float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH;
float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT;
float d_squared = powf(x, 2) + powf(y, 2);
if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d_squared < powf(RECON_CYL_RADIUS, 2) ) )
FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 1;
else
FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 0;
}
}
}
//write_array_to_disk( "FBP_object", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk( "X_h", output_directory, output_folder, X_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk( "x_FBP", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
/************************************************************************************************************************************************************/
/****************************************************************** Image Initialization *******************************************************************/
/************************************************************************************************************************************************************/
void initialize_SC_hull( bool*& SC_hull_h, bool*& SC_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
SC_hull_h = (bool*)calloc( VOXELS, sizeof(bool));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
SC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = true;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
hipMalloc((void**) &SC_hull_d, MEM_SIZE_IMAGE_BOOL);
hipMemcpy(SC_hull_d, SC_hull_h, MEM_SIZE_IMAGE_BOOL, hipMemcpyHostToDevice) ;
}
void initialize_MSC_hull( int*& MSC_hull_h, int*& MSC_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
MSC_hull_h = (int*)calloc( VOXELS, sizeof(int));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
MSC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
hipMalloc((void**) &MSC_hull_d, MEM_SIZE_IMAGE_INT);
hipMemcpy(MSC_hull_d, MSC_hull_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice) ;
}
void initialize_SM_hull( int*& SM_hull_h, int*& SM_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
SM_hull_h = (int*)calloc( VOXELS, sizeof(int));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
SM_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
hipMalloc((void**) &SM_hull_d, MEM_SIZE_IMAGE_INT);
hipMemcpy(SM_hull_d, SM_hull_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice) ;
}
void initialize_float_image( float*& float_image_h, float*& float_image_d )
{
//Create space carve object, init to zeros
float_image_h = (float*)calloc( VOXELS, sizeof(float));
double x, y;
// Set inner cylinder to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < double(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
float_image_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
hipMalloc((void**) &float_image_d, MEM_SIZE_IMAGE_FLOAT);
hipMemcpy(float_image_d, float_image_h, MEM_SIZE_IMAGE_FLOAT, hipMemcpyHostToDevice) ;
}
/************************************************************************************************************************************************************/
/******************************************************************* Hull Detection *************************************************************************/
/************************************************************************************************************************************************************/
void hull_detection_initializations()
{
if( SC_ON || MSC_ON || SM_ON )
puts("Initializing hull-detection images...");
if( SC_ON )
initialize_SC_hull( SC_image_h, SC_image_d );
if( MSC_ON )
initialize_MSC_hull( MSC_image_h, MSC_image_d );
if( SM_ON )
initialize_SM_hull( SM_image_h, SM_image_d );
}
void hull_detection( int histories_to_process)
{
if( SC_ON && (!bad_data_angle( gantry_angle_h[0] ) || !RESTRICTED_ANGLES ) )
SC( histories_to_process );
if( MSC_ON )
MSC( histories_to_process );
if( SM_ON )
SM( histories_to_process );
}
__device__ void voxel_walk( bool*& image, float x_entry, float y_entry, float z_entry, float x_exit, float y_exit, float z_exit )
{
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit - y_entry)/(x_exit - x_entry) * VOXEL_WIDTH );
delta_z = abs( (z_exit - z_entry)/(x_exit - x_entry) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry == x_exit )
{
delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
if( y_entry == y_exit )
{
delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );
delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry, y = y_entry, z = z_entry;
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry != z_exit )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit == z_entry\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h
}
void SC( int num_histories )
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
hipLaunchKernelGGL(( SC_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, SC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void SC_GPU
(
int num_histories, bool* SC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= SC_THRESHOLD) && (bin_num[i] >= 0) )
{
voxel_walk( SC_image, x_entry[i], y_entry[i], z_entry[i], x_exit[i], y_exit[i], z_exit[i] );
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) )
}
/************************************************************************************************************************************************************/
void MSC( int num_histories )
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
hipLaunchKernelGGL(( MSC_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, MSC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void MSC_GPU
(
int num_histories, int* MSC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] < MSC_THRESHOLD) && (bin_num[i] >= 0) )
{
//char user_response[20];
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i]) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i]) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry[i] == x_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
if( y_entry[i] == y_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );
delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] );
y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] );
z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry[i], y = y_entry[i], z = z_entry[i];
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry[i] != z_exit[i] )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit[i] == z_entry[i]\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i]
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) )
}
void MSC_threshold()
{
hipMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
write_array_to_disk("MSC_image", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
hipLaunchKernelGGL(( MSC_threshold_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, MSC_image_d );
hipMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
write_array_to_disk("MSC_image_thresholded", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_MSC", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
hipFree( MSC_image_d );
free(MSC_image_h);
}
__global__ void MSC_threshold_GPU( int* MSC_image )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
int difference, max_difference = 0;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = MSC_image[voxel] - MSC_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
}
syncthreads();
if( max_difference > MSC_DIFF_THRESH )
MSC_image[voxel] = 0;
else if( MSC_image[voxel] == 0 )
MSC_image[voxel] = 0;
else
MSC_image[voxel] = 1;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
MSC_image[voxel] = 0;
}
/************************************************************************************************************************************************************/
void SM( int num_histories)
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
hipLaunchKernelGGL(( SM_GPU), dim3(dimGrid), dim3(dimBlock), 0, 0,
num_histories, SM_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void SM_GPU
(
int num_histories, int* SM_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SM_LOWER_THRESHOLD) && (bin_num[i] >= 0) )
{
//char user_response[20];
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry[i] == x_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
if( y_entry[i] == y_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );
delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] );
y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] );
z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry[i], y = y_entry[i], z = z_entry[i];
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry[i] != z_exit[i] )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit[i] == z_entry[i]\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i]
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SPACE_MODEL_LOWER_THRESHOLD) && (WEPL[i] <= SPACE_MODEL_UPPER_THRESHOLD) && (bin_num[i] >= 0) )
}
void SM_threshold()
{
// Copy the space modeled image from the GPU to the CPU and write it to file.
hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
write_array_to_disk("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) );
int* SM_differences_d;
hipMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT );
hipMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
hipLaunchKernelGGL(( carve_differences), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_differences_d, SM_image_d );
hipMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost );
int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) );
int voxel;
int max_difference = 0;
for( int slice = 0; slice < SLICES; slice++ )
{
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences_h[voxel] > max_difference )
{
max_difference = SM_differences_h[voxel];
SM_thresholds_h[slice] = SM_image_h[voxel];
}
}
printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] );
max_difference = 0;
}
int* SM_thresholds_d;
unsigned int threshold_size = SLICES * sizeof(int);
hipMalloc((void**) &SM_thresholds_d, threshold_size );
hipMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( SM_threshold_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_image_d, SM_thresholds_d);
hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
//write_array_to_disk("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
hipFree( SM_differences_d );
hipFree( SM_thresholds_d );
hipFree( SM_image_d );
free(SM_differences_h);
free(SM_thresholds_h);
free(SM_image_h);
}
__global__ void SM_threshold_GPU( int* SM_image, int* SM_threshold )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
if( voxel < VOXELS )
{
if( SM_image[voxel] > SM_THRESHOLD_MULTIPLIER * SM_threshold[slice] )
SM_image[voxel] = 1;
else
SM_image[voxel] = 0;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
SM_image[voxel] = 0;
}
}
void SM_threshold_2()
{
// Copy the space modeled image from the GPU to the CPU and write it to file.
hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
write_array_to_disk("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) );
int* SM_differences_d;
hipMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT );
hipMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
hipLaunchKernelGGL(( carve_differences), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_differences_d, SM_image_d );
hipMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost );
int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) );
int voxel;
int max_difference = 0;
for( int slice = 0; slice < SLICES; slice++ )
{
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences_h[voxel] > max_difference )
{
max_difference = SM_differences_h[voxel];
SM_thresholds_h[slice] = SM_image_h[voxel];
}
}
printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] );
max_difference = 0;
}
int* SM_thresholds_d;
unsigned int threshold_size = SLICES * sizeof(int);
hipMalloc((void**) &SM_thresholds_d, threshold_size );
hipMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, hipMemcpyHostToDevice );
hipLaunchKernelGGL(( SM_threshold_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_image_d, SM_thresholds_d);
hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
//write_array_to_disk("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
hipFree( SM_differences_d );
hipFree( SM_thresholds_d );
hipFree( SM_image_d );
free(SM_differences_h);
free(SM_thresholds_h);
free(SM_image_h);
}
__global__ void SM_threshold_GPU_2( int* SM_image, int* SM_differences )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
int difference, max_difference = 0;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = SM_image[voxel] - SM_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
SM_differences[voxel] = max_difference;
}
syncthreads();
int slice_threshold;
max_difference = 0;
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences[voxel] > max_difference )
{
max_difference = SM_differences[voxel];
slice_threshold = SM_image[voxel];
}
}
syncthreads();
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( voxel < VOXELS )
{
if( SM_image[voxel] > SM_THRESHOLD_MULTIPLIER * slice_threshold )
SM_image[voxel] = 1;
else
SM_image[voxel] = 0;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
SM_image[voxel] = 0;
}
}
void hull_detection_finish()
{
if( SC_ON || MSC_ON || SM_ON )
puts("Performing Hull Thresholding and Writing Hull Images to Disk...");
if( SC_ON )
{
hipMemcpy(SC_image_h, SC_image_d, MEM_SIZE_IMAGE_BOOL, hipMemcpyDeviceToHost);
write_array_to_disk("x_sc", output_directory, output_folder, SC_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
if( MSC_ON )
MSC_threshold();
if( SM_ON )
SM_threshold();
if( SC_ON || MSC_ON || SM_ON )
puts("Hull-Detection Complete.");
}
/************************************************************************************************************************************************************/
__global__ void carve_differences( int* carve_differences, int* image )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
int difference, max_difference = 0;
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = image[voxel] - image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
carve_differences[voxel] = max_difference;
}
}
void averaging_filter( bool*& image_h, bool*& image_d, const int filter_size )
{
initialize_SC_hull(image_h, image_d);
float threshold = 0;
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
hipLaunchKernelGGL(( averaging_filter_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, image_d, filter_size, threshold);
hipMemcpy(image_h, image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost) ;
write_array_to_disk( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
__global__ void averaging_filter_GPU( bool* image, const int filter_size, const float threshold )
{
int voxel_x = blockIdx.x;
int voxel_y = blockIdx.y;
int voxel_z = threadIdx.x;
int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS;
int sum = image[voxel];
if( (voxel_x > 0) && (voxel_y > 0) && (voxel_x < COLUMNS - 1) && (voxel_y < ROWS - 1) )
{
for( int i = voxel_x - filter_size/2; i <= voxel_x + filter_size/2; i++ )
for( int j = voxel_y - filter_size/2; j <= voxel_y + filter_size/2; j++ )
sum += image[i + j * COLUMNS + voxel_z * COLUMNS * ROWS];
}
//value[voxel] = sum > threshold;
syncthreads();
image[voxel] = sum > threshold;
}
/************************************************************************************************************************************************************/
/******************************************************** Memory Transfers, Maintenance, and Cleaning *******************************************************/
/************************************************************************************************************************************************************/
void initial_processing_memory_clean()
{
free( gantry_angle_h );
hipFree( x_entry_d );
hipFree( y_entry_d );
hipFree( z_entry_d );
hipFree( x_exit_d );
hipFree( y_exit_d );
hipFree( z_exit_d );
hipFree( traversed_recon_volume_d );
hipFree( bin_num_d );
hipFree( WEPL_d);
}
void post_cut_memory_clean()
{
puts("Freeing unnecessary memory, resizing vectors and shrinking vectors to just fit remaining histories...");
free(passed_cuts_h );
free(stddev_rel_ut_angle_h);
free(stddev_rel_uv_angle_h);
free(stddev_WEPL_h);
hipFree( passed_cuts_d );
hipFree( bin_num_d );
hipFree( WEPL_d );
hipFree( xy_entry_angle_d );
hipFree( xz_entry_angle_d );
//hipFree( xy_exit_angle_d );
//hipFree( xz_exit_angle_d );
hipFree( relative_ut_angle_d );
hipFree( relative_uv_angle_d );
hipFree( mean_rel_ut_angle_d );
hipFree( mean_rel_uv_angle_d );
hipFree( mean_WEPL_d );
hipFree( stddev_rel_ut_angle_d );
hipFree( stddev_rel_uv_angle_d );
hipFree( stddev_WEPL_d );
}
void resize_vectors( const int new_size )
{
bin_num_vector.resize( new_size );
//gantry_angle_vector.resize( new_size );
WEPL_vector.resize( new_size );
x_entry_vector.resize( new_size );
y_entry_vector.resize( new_size );
z_entry_vector.resize( new_size );
x_exit_vector.resize( new_size );
y_exit_vector.resize( new_size );
z_exit_vector.resize( new_size );
xy_entry_angle_vector.resize( new_size );
xz_entry_angle_vector.resize( new_size );
//xy_exit_angle_vector.resize( new_size );
//xz_exit_angle_vector.resize( new_size );
relative_ut_angle_vector.resize( new_size );
relative_uv_angle_vector.resize( new_size );
}
void shrink_vectors( const int new_capacity )
{
bin_num_vector.shrink_to_fit();
//gantry_angle_vector.shrink_to_fit();
WEPL_vector.shrink_to_fit();
x_entry_vector.shrink_to_fit();
y_entry_vector.shrink_to_fit();
z_entry_vector.shrink_to_fit();
x_exit_vector.shrink_to_fit();
y_exit_vector.shrink_to_fit();
z_exit_vector.shrink_to_fit();
xy_entry_angle_vector.shrink_to_fit();
xz_entry_angle_vector.shrink_to_fit();
//xy_exit_angle_vector.shrink_to_fit();
//xz_exit_angle_vector.shrink_to_fit();
relative_ut_angle_vector.shrink_to_fit();
relative_uv_angle_vector.shrink_to_fit();
}
/************************************************************************************************************************************************************/
/****************************************************** Routines for Writing Data Arrays/Vectors to Disk ****************************************************/
/************************************************************************************************************************************************************/
template<typename T> void write_array_to_disk( char* filename_base, const char* directory, const char* folder, T* data, const int x_max, const int y_max, const int z_max, const int elements, const bool single_file )
{
char filename[256];
ofstream output_file;
int index;
int num_files = z_max;
int z_start = 0;
int z_end = 1;
if( single_file )
{
num_files = 1;
z_end = z_max;
}
for( int file = 0; file < num_files; file++)
{
if( num_files == z_max )
sprintf( filename, "%s%s/%s_%d.txt", directory, folder, filename_base, file );
else
sprintf( filename, "%s%s/%s.txt", directory, folder, filename_base );
output_file.open(filename);
for(int z = z_start; z < z_end; z++)
{
for(int y = 0; y < y_max; y++)
{
for(int x = 0; x < x_max; x++)
{
index = x + ( y * x_max ) + ( z * x_max * y_max );
if( index >= elements )
break;
output_file << data[index] << " ";
}
if( index >= elements )
break;
output_file << endl;
}
if( index >= elements )
break;
}
z_start += 1;
z_end += 1;
output_file.close();
}
}
template<typename T> void write_vector_to_disk( char* filename_base, const char* directory, const char* folder, vector<T> data, const int x_max, const int y_max, const int z_max, const bool single_file )
{
char filename[256];
ofstream output_file;
int elements = data.size();
int index;
int num_files = z_max;
int z_start = 0;
int z_end = 1;
if( single_file )
{
num_files = 1;
z_end = z_max;
}
for( int file = 0; file < num_files; file++)
{
if( num_files == z_max )
sprintf( filename, "%s%s/%s_%d.txt", directory, folder, filename_base, file );
else
sprintf( filename, "%s%s/%s.txt", directory, folder, filename_base );
output_file.open(filename);
for(int z = z_start; z < z_end; z++)
{
for(int y = 0; y < y_max; y++)
{
for(int x = 0; x < x_max; x++)
{
index = x + ( y * x_max ) + ( z * x_max * y_max );
if( index >= elements )
break;
output_file << data[index] << " ";
}
if( index >= elements )
break;
output_file << endl;
}
if( index >= elements )
break;
}
z_start += 1;
z_end += 1;
output_file.close();
}
}
/********************************************************************* Helper Functions *********************************************************************/
/************************************************************************************************************************************************************/
bool bad_data_angle( const int angle )
{
static const int bad_angles_array[] = {80, 84, 88, 92, 96, 100, 00, 180, 260, 264, 268, 272, 276};
vector<int> bad_angles(bad_angles_array, bad_angles_array + sizeof(bad_angles_array) / sizeof(bad_angles_array[0]) );
bool bad_angle = false;
for( int i = 0; i < bad_angles.size(); i++ )
if( angle == bad_angles[i] )
bad_angle = true;
return bad_angle;
}
int calculate_x_voxel(const float x_position, const int x_voxels, const float voxel_width )
{
// -10 100 1 [-50 49] -40
float x_width = x_voxels * voxel_width;//100
float x_range = x_width/2;//50
return ( x_position + x_range) / voxel_width;//-10+50/1 = 40
//[0 99]
}
int calculate_y_voxel(const float y_position, const int y_voxels, const float voxel_height )
{
// 10 100 1 [-50 49] 40
float y_width = y_voxels * voxel_height;//100
float y_range = y_width/2;//50
return ( y_range - y_position ) / voxel_height;
}
int calculate_slice(const float z_position, const int z_voxels, const float voxel_thickness )
{
// -10 100 1 [-50 49] -40
float z_width = z_voxels * voxel_thickness;//100
float z_range = z_width/2;//50
return ( z_range - z_position ) / voxel_thickness;
}
void early_exit_if( bool early_exit)
{
if( early_exit )
{
char user_response[20];
puts("Hit enter to stop...");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
}
void start_execution_timing()
{
start_time = clock();
}
void stop_execution_timing()
{
end_time = clock();
execution_time = (end_time - start_time) / CLOCKS_PER_SEC;
printf( "Total execution time : %3f\n", double(execution_time) );
}
/************************************************************************************************************************************************************/
/****************************************************************** Testing Functions ***********************************************************************/
/************************************************************************************************************************************************************/
void test_func()
{
//char user_response[20];
//initialize_MSC_hull(MSC_image_h, MSC_image_d);
////fgets(user_response, sizeof(user_response), stdin);
//dim3 dimBlock( SLICES );
//dim3 dimGrid( COLUMNS, ROWS );
//test_func_GPU<<< dimGrid, dimBlock >>>( MSC_image_d );
//hipMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost);
////write_array_to_disk( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
//for( int i = 0; i < 20; i++ )
// cout << MSC_image_h[i] << endl;
cout << CLOCKS_PER_SEC << endl;
/*int num_elements = 5;
int init = 0;
int* series1 = (int*) calloc( num_elements, sizeof(int) );
int* series2 = (int*) calloc( num_elements, sizeof(int) );
for( int i = 0; i < num_elements; i++ )
{
series1[i] = i;
series2[i] = i;
}
int series[5] = {1, 2, 3, 4, 5 };
vector<int> vec (series, series + sizeof(series) / sizeof(int) );
int result = inner_product(series1, series1 + num_elements, series2, init );
cout << result << endl;*/
}
__global__ void test_func( int* image )
{
int voxel_x = blockIdx.x;
int voxel_y = blockIdx.y;
int voxel_z = threadIdx.x;
int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS;
int x = 0, y = 0, z = 0;
test_func_device( x, y, z );
image[voxel] = x * y * z;
}
__device__ void test_func_device( int& x, int& y, int& z )
{
x = 2;
y = 3;
z = 4;
} | 0a1d636461d2d6954509c26418756d8093a34322.cu | //********************************************************************************************************************************************************//
//*********************************************** Proton CT Preprocessing and Image Reconstruction Code *************************************************//
//********************************************************************************************************************************************************//
#include "pCT_Reconstruction.h"
//********************************************************************************************************************************************************//
//********************************************************************** Host Code ***********************************************************************//
//********************************************************************************************************************************************************//
// Preprocessing setup and initializations
void assign_SSD_positions();
void initializations();
void count_histories();
void count_histories_old();
void count_histories_v0();
void count_histories_v1();
void reserve_vector_capacity();
// Preprocessing routines
void read_data_chunk( const int, const int, const int );
void read_data_chunk_old( const int, const int, const int );
void read_data_chunk_v0( const int, const int, const int );
void read_data_chunk_v1( const int, const int, const int );
void recon_volume_intersections( const int );
void bin_valid_histories( const int );
void calculate_means();
void sum_squared_deviations( const int, const int );
void calculate_standard_deviations();
void statistical_cuts( const int, const int );
void initialize_sinogram();
void construct_sinogram();
void filter();
void backprojection();
// Hull-Detection
void hull_detection_initializations();
void hull_detection( int );
void hull_detection_finish();
void initialize_SC_hull( bool*&, bool*& );
void initialize_MSC_hull( int*&, int*& );
void initialize_SM_hull( int*&, int*& );
void initialize_float_image( float*&, float*& );
void SC( int );
void MSC( int );
void MSC_threshold();
void SM( int );
void SM_threshold();
void SM_threshold_2();
void averaging_filter( bool*&, bool*&, const int);
// MLP: IN DEVELOPMENT
void create_MLP_test_image();
void MLP_test();
void MLP();
void MLP_entry_exit( int&, int&, int& );
float mean_chord_length( float, float );
// Write arrays/vectors to file(s)
template<typename T> void write_array_to_disk( char*, const char*, const char*, T*, const int, const int, const int, const int, const bool );
template<typename T> void write_vector_to_disk( char*, const char*, const char*, vector<T>, const int, const int, const int, const bool );
// Memory transfers and allocations/deallocations
void post_cut_memory_clean();
void resize_vectors( const int );
void shrink_vectors( const int );
void initial_processing_memory_clean();
// Helper Functions
bool bad_data_angle( const int );
int calculate_x_voxel( const float, const int, const float );
int calculate_y_voxel( const float, const int, const float );
int calculate_slice( const float, const int, const float );
void early_exit_if( bool );
void start_execution_timing();
void stop_execution_timing();
// New routine test functions
void test_func();
//********************************************************************************************************************************************************//
//****************************************************************** Device (GPU) Code *******************************************************************//
//********************************************************************************************************************************************************//
// Preprocessing routines
__global__ void recon_volume_intersections_GPU( int, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*);
__global__ void bin_valid_histories_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* );
__global__ void calculate_means_GPU( int*, float*, float*, float* );
__global__ void sum_squared_deviations_GPU( int, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* );
__global__ void calculate_standard_deviations_GPU( int*, float*, float*, float* );
__global__ void statistical_cuts_GPU( int, int*, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, bool*, float*, float* );
__global__ void construct_sinogram_GPU( int*, float* );
__global__ void filter_GPU( float*, float* );
// Hull-Detection
__device__ void voxel_walk( bool*&, float, float, float, float, float, float );
__global__ void SC_GPU( int, bool*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void MSC_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void SM_GPU( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* );
__global__ void MSC_threshold_GPU( int* );
__global__ void SM_threshold_GPU( int*, int* );
__global__ void SM_threshold_GPU_2( int*, int* );
__global__ void carve_differences( int*, int* );
__global__ void averaging_filter_GPU( bool*, const int, const float );
// New routine test functions
__global__ void test_func_GPU( int*);
__device__ void test_func_device( int&, int&, int&);
/************************************************************************************************************************************************************/
/******************************************************************** Program Main **************************************************************************/
/************************************************************************************************************************************************************/
int main(int argc, char** argv)
{
char user_response[20];
test_func();
if( RUN_ON )
{
/********************************************************************************************************************************************************/
/* Start the execution timing clock */
/********************************************************************************************************************************************************/
start_execution_timing();
/********************************************************************************************************************************************************/
/* Initialize hull detection images and transfer them to the GPU (performed if SC_ON, MSC_ON, or SM_ON is true) */
/********************************************************************************************************************************************************/
hull_detection_initializations();
/********************************************************************************************************************************************************/
/* Read the u-coordinates of the detector planes from the config file, allocate and initialize statistical data arrays, and count the number of */
/* histories per file, projection, gantry angle, scan, and total. */
/********************************************************************************************************************************************************/
if( DATA_FORMAT == -1 )
assign_SSD_positions(); // Read the detector plane u-coordinates from config file
initializations(); // allocate and initialize host and GPU memory for binning
count_histories(); // count the number of histories per file, per scan, total, etc.
switch( DATA_FORMAT )
{
case -1 : count_histories_old(); break;
case 0 : count_histories_v0(); break;
case 1 : count_histories_v1(); break;
}
/********************************************************************************************************************************************************/
/* Iteratively Read and Process Data One Chunk at a Time. There are at Most MAX_GPU_HISTORIES Per Chunk (i.e. Iteration). On Each Iteration: */
/* (1) Read data from file */
/* (2) Determine which histories traverse the reconstruction volume and store this information in a boolean array */
/* (3) Determine which bin each history belongs to */
/* (4) Use the boolean array to determine which histories to keep and then push the intermediate data from these histories onto the permanent */
/* storage vectors */
/* (5) Free up temporary host/GPU array memory allocated during iteration */
/********************************************************************************************************************************************************/
puts("Iteratively Reading Data from Hard Disk");
puts("Removing Proton Histories that Don't Pass Through the Reconstruction Volume");
puts("Binning the Data from Those that Did...");
int start_file_num = 0, end_file_num = 0, histories_to_process = 0;
while( start_file_num != NUM_FILES )
{
while( end_file_num < NUM_FILES )
{
if( histories_to_process + histories_per_file[end_file_num] < MAX_GPU_HISTORIES )
histories_to_process += histories_per_file[end_file_num];
else
break;
end_file_num++;
}
read_data_chunk( histories_to_process, start_file_num, end_file_num );
recon_volume_intersections( histories_to_process );
bin_valid_histories( histories_to_process );
hull_detection( histories_to_process );
initial_processing_memory_clean();
start_file_num = end_file_num;
histories_to_process = 0;
}
puts("Data reading complete.");
early_exit_if( EXIT_AFTER_BINNING );
/********************************************************************************************************************************************************/
/* Reduce vector capacities to their size, the number of histories remaining afterhistories that didn't intersect reconstruction volume were ignored */
/********************************************************************************************************************************************************/
shrink_vectors( recon_vol_histories );
/********************************************************************************************************************************************************/
/* Perform thresholding on MSC and SM hulls and write all hull images to file */
/********************************************************************************************************************************************************/
hull_detection_finish();
early_exit_if( EXIT_AFTER_HULL_DETECTION );
/********************************************************************************************************************************************************/
/* Calculate the mean WEPL, relative ut-angle, and relative uv-angle for each bin and count the number of histories in each bin */
/********************************************************************************************************************************************************/
calculate_means();
/********************************************************************************************************************************************************/
/* Calculate the standard deviation in WEPL, relative ut-angle, and relative uv-angle for each bin. Iterate through the valid history vectors one */
/* chunk at a time, with at most MAX_GPU_HISTORIES per chunk, and calculate the difference between the mean WEPL and WEPL, mean relative ut-angle and */
/* relative ut-angle, and mean relative uv-angle and relative uv-angle for each history. The standard deviation is then found by calculating the sum */
/* of these differences for each bin and dividing it by the number of histories in the bin */
/********************************************************************************************************************************************************/
puts("Calculating the cumulative sum of the squared deviation in WEPL and relative ut/uv angles over all histories for each bin...");
int remaining_histories = recon_vol_histories;
int start_position = 0;
while( remaining_histories > 0 )
{
if( remaining_histories > MAX_GPU_HISTORIES )
histories_to_process = MAX_GPU_HISTORIES;
else
histories_to_process = remaining_histories;
sum_squared_deviations( start_position, histories_to_process );
remaining_histories -= MAX_GPU_HISTORIES;
start_position += MAX_GPU_HISTORIES;
} // sum_sqd_deviations
// sum_squared_deviations
calculate_standard_deviations();
/********************************************************************************************************************************************************/
/* Allocate host memory for the sinogram, initialize it to zeros, allocate memory for it on the GPU, then transfer the initialized sinogram to the GPU */
/********************************************************************************************************************************************************/
initialize_sinogram();
/********************************************************************************************************************************************************/
/* Iterate through the valid history vectors one chunk at a time, with at most MAX_GPU_HISTORIES per chunk, and perform statistical cuts */
/********************************************************************************************************************************************************/
puts("Performing statistical cuts...");
remaining_histories = recon_vol_histories, start_position = 0;
while( remaining_histories > 0 )
{
if( remaining_histories > MAX_GPU_HISTORIES )
histories_to_process = MAX_GPU_HISTORIES;
else
histories_to_process = remaining_histories;
statistical_cuts( start_position, histories_to_process );
remaining_histories -= MAX_GPU_HISTORIES;
start_position += MAX_GPU_HISTORIES;
}
puts("Statistical cuts complete...");
printf("%d out of %d (%4f) histories passed cuts\n", post_cut_histories, total_histories, double( post_cut_histories / total_histories * 100 ) );
/********************************************************************************************************************************************************/
/* Free host memory for bin number array, free GPU memory for the statistics arrays, and shrink vectors to the number of histories that passed cuts */
/********************************************************************************************************************************************************/
post_cut_memory_clean();
resize_vectors( post_cut_histories );
shrink_vectors( post_cut_histories );
early_exit_if( EXIT_AFTER_STATISTICAL_CUTS );
/********************************************************************************************************************************************************/
/* Recalculate the mean WEPL for each bin using the histories remaining after cuts and use these to produce the sinogram */
/********************************************************************************************************************************************************/
construct_sinogram();
/********************************************************************************************************************************************************/
/* Perform filtered backprojection and write FBP hull to disk */
/********************************************************************************************************************************************************/
if( FBP_ON )
{
filter();
backprojection();
}
early_exit_if( EXIT_AFTER_FBP );
/********************************************************************************************************************************************************/
/* End program execution timing clock and print the total execution time to console window */
/********************************************************************************************************************************************************/
stop_execution_timing();
}
/************************************************************************************************************************************************************/
/* Program has finished execution. require the user to hit the enter key to terminate the program and close the terminal/console window */
/************************************************************************************************************************************************************/
puts("Preprocessing complete. Press any key to close the console window...");
fgets(user_response, sizeof(user_response), stdin);
}
/************************************************************************************************************************************************************/
/******************************************************** Preprocessing Setup and Initializations ***********************************************************/
/************************************************************************************************************************************************************/
void assign_SSD_positions() //HERE THE COORDINATES OF THE DETECTORS PLANES ARE LOADED, THE CONFIG FILE IS CREATED BY FORD (RWS)
{
char user_response[20];
char configFilename[512];
puts("Reading tracker plane positions...");
sprintf(configFilename, "%s%s\\scan.cfg", input_directory, input_folder);
if( DEBUG_TEXT_ON )
printf("Opening config file %s...\n", configFilename);
ifstream configFile(configFilename);
if( !configFile.is_open() ) {
printf("ERROR: config file not found at %s!\n", configFilename);
fputs("Didn't Find File", stdout);
fflush(stdout);
printf("text = \"%s\"\n", user_response);
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
else
{
fputs("Found File", stdout);
fflush(stdout);
printf("user_response = \"%s\"\n", user_response);
}
if( DEBUG_TEXT_ON )
puts("Reading Tracking Plane Positions...");
for( int i = 0; i < 8; i++ ) {
configFile >> SSD_u_Positions[i];
if( DEBUG_TEXT_ON )
printf("SSD_u_Positions[%d] = %3f", i, SSD_u_Positions[i]);
}
configFile.close();
}
void initializations()
{
puts("Allocating statistical analysis arrays on host/GPU and counting proton histories...");
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
histories_per_scan[scan_number] = 0;
histories_per_file = (int*) calloc( NUM_SCANS * GANTRY_ANGLES, sizeof(int) );
histories_per_gantry_angle = (int*) calloc( GANTRY_ANGLES, sizeof(int) );
recon_vol_histories_per_projection = (int*) calloc( GANTRY_ANGLES, sizeof(int) );
bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) );
mean_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) );
mean_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
mean_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) );
stddev_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) );
cudaMalloc((void**) &bin_counts_d, MEM_SIZE_BINS_INTS );
cudaMalloc((void**) &mean_WEPL_d, MEM_SIZE_BINS_FLOATS );
cudaMalloc((void**) &mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS );
cudaMalloc((void**) &mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS );
cudaMalloc((void**) &stddev_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS );
cudaMalloc((void**) &stddev_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS );
cudaMalloc((void**) &stddev_WEPL_d, MEM_SIZE_BINS_FLOATS );
cudaMemcpy( bin_counts_d, bin_counts_h, MEM_SIZE_BINS_INTS, cudaMemcpyHostToDevice );
cudaMemcpy( mean_WEPL_d, mean_WEPL_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
cudaMemcpy( mean_rel_ut_angle_d, mean_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
cudaMemcpy( mean_rel_uv_angle_d, mean_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
cudaMemcpy( stddev_rel_ut_angle_d, stddev_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
cudaMemcpy( stddev_rel_uv_angle_d, stddev_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
cudaMemcpy( stddev_WEPL_d, stddev_WEPL_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
}
void count_histories()
{
switch( DATA_FORMAT )
{
case -1 : count_histories_old(); break;
case 0 : count_histories_v0(); break;
case 1 : count_histories_v1(); break;
}
}
void count_histories_old()
{
if( DEBUG_TEXT_ON )
printf("Counting histories...\n");
char user_response[20];
char data_filename[128];
int file_size, num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension );
//printf("Name = %s", data_filename );
FILE *data_file = fopen(data_filename, "rb");
if( data_file == NULL )
{
fputs( "Error Opening Data File: Check that the directories are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
fseek( data_file, 0, SEEK_END );
file_size = ftell( data_file );
if( BINARY_ENCODING )
{
if( file_size % BYTES_PER_HISTORY )
{
printf("ERROR! Problem with bytes_per_history!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(2);
}
num_histories = file_size / BYTES_PER_HISTORY;
}
else
num_histories = file_size;
fclose(data_file);
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
}
void count_histories_v0()
{
if( DEBUG_TEXT_ON )
puts("Counting histories...\n");
char user_response[20];
char data_filename[256];
int num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
//cout << data_filename << endl;
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
data_file.read((char*)&num_histories, sizeof(int));
data_file.close();
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
else
{
printf("ERROR: Unsupported format version (%d)!\n", version_id);
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
// The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate
// through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a
// valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin
// number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many
// valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories
// are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw
// data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we
// have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to
// and from the GPU.
}
void count_histories_v1()
{
if( DEBUG_TEXT_ON )
printf("Counting histories...\n");
char user_response[20];
char data_filename[128];
int file_size, num_histories, file_number = 0, gantry_position_number = 0;
for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ )
{
for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ )
{
sprintf(data_filename, "%s%s/%s_%03d%%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
FILE *data_file = fopen(data_filename, "rb");
if( data_file == NULL )
{
fputs( "Error Opening Data File: Check that the directories are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
fseek( data_file, 0, SEEK_END );
file_size = ftell( data_file );
if( BINARY_ENCODING )
{
if( file_size % BYTES_PER_HISTORY )
{
printf("ERROR! Problem with bytes_per_history!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(2);
}
num_histories = file_size / BYTES_PER_HISTORY;
}
else
num_histories = file_size;
fclose(data_file);
histories_per_file[file_number] = num_histories;
histories_per_gantry_angle[gantry_position_number] += num_histories;
histories_per_scan[scan_number-1] += num_histories;
total_histories += num_histories;
if( DEBUG_TEXT_ON )
printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number);
}
}
if( DEBUG_TEXT_ON )
{
for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ )
{
if( file_number % NUM_SCANS == 0 )
printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) );
printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 );
}
for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ )
printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1);
printf("There are a Total of %d Histories\n", total_histories);
}
// The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate
// through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a
// valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin
// number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many
// valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories
// are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw
// data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we
// have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to
// and from the GPU.
}
void reserve_vector_capacity()
{
bin_num_vector.reserve( total_histories );
//gantry_angle_vector.reserve( total_histories );
WEPL_vector.reserve( total_histories );
x_entry_vector.reserve( total_histories );
y_entry_vector.reserve( total_histories );
z_entry_vector.reserve( total_histories );
x_exit_vector.reserve( total_histories );
y_exit_vector.reserve( total_histories );
z_exit_vector.reserve( total_histories );
xy_entry_angle_vector.reserve( total_histories );
xz_entry_angle_vector.reserve( total_histories );
//xy_exit_angle_vector.reserve( total_histories );
//xz_exit_angle_vector.reserve( total_histories );
relative_ut_angle_vector.reserve( total_histories );
relative_uv_angle_vector.reserve( total_histories );
}
/************************************************************************************************************************************************************/
/********************************************************* Data Importation, Initial Cuts, and Binning ******************************************************/
/************************************************************************************************************************************************************/
void read_data_chunk( const int histories_to_process, const int start_file_num, const int end_file_num )
{
switch( DATA_FORMAT )
{
case -1 : read_data_chunk_old( histories_to_process, start_file_num, end_file_num - 1 ); break;
case 0 : read_data_chunk_v0( histories_to_process, start_file_num, end_file_num - 1 ); break;
case 1 : read_data_chunk_v1( histories_to_process, start_file_num, end_file_num - 1 ); break;
}
}
void read_data_chunk_old( const int num_histories, const int start_file_num, const int end_file_num )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
int array_index = 0, gantry_position, gantry_angle, scan_number, scan_histories;
float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
char tracker_plane[4];
char data_filename[128];
FILE* data_file;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
gantry_position = file_num / NUM_SCANS;
gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
scan_number = file_num % NUM_SCANS + 1;
scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension );
data_file = fopen( data_filename, "rb" );
for( int history = 0; history < scan_histories; history++, array_index++ )
{
fread(&v_data, sizeof(float), 4, data_file);
fread(&t_data, sizeof(float), 4, data_file);
fread(&tracker_plane, sizeof(char), 4, data_file);
fread(&WEPL_data, sizeof(float), 1, data_file);
fread(&gantry_angle_data, sizeof(float), 1, data_file);
fread(&dummy_data, sizeof(float), 1, data_file); // dummy read because each event has an extra 4 bytes, for some reason
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[array_index] = v_data[0] * 0.1;
v_in_2_h[array_index] = v_data[1] * 0.1;
v_out_1_h[array_index] = v_data[2] * 0.1;
v_out_2_h[array_index] = v_data[3] * 0.1;
t_in_1_h[array_index] = t_data[0] * 0.1;
t_in_2_h[array_index] = t_data[1] * 0.1;
t_out_1_h[array_index] = t_data[2] * 0.1;
t_out_2_h[array_index] = t_data[3] * 0.1;
WEPL_h[array_index] = WEPL_data * 0.1;
}
else
{
v_in_1_h[array_index] = v_data[0];
v_in_2_h[array_index] = v_data[1];
v_out_1_h[array_index] = v_data[2];
v_out_2_h[array_index] = v_data[3];
t_in_1_h[array_index] = t_data[0];
t_in_2_h[array_index] = t_data[1];
t_out_1_h[array_index] = t_data[2];
t_out_2_h[array_index] = t_data[3];
WEPL_h[array_index] = WEPL_data;
}
if( !MICAH_SIM )
{
u_in_1_h[array_index] = SSD_u_Positions[int(tracker_plane[0])];
u_in_2_h[array_index] = SSD_u_Positions[int(tracker_plane[1])];
u_out_1_h[array_index] = SSD_u_Positions[int(tracker_plane[2])];
u_out_2_h[array_index] = SSD_u_Positions[int(tracker_plane[3])];
}
else
{
u_in_1_h[array_index] = SSD_u_Positions[0];
u_in_2_h[array_index] = SSD_u_Positions[2];
u_out_1_h[array_index] = SSD_u_Positions[4];
u_out_2_h[array_index] = SSD_u_Positions[6];
}
if( SSD_IN_MM )
{
// Convert the tracking plane positions from mm to cm
u_in_1_h[array_index] *= 0.1;
u_in_2_h[array_index] *= 0.1;
u_out_1_h[array_index] *= 0.1;
u_out_2_h[array_index] *= 0.1;
}
gantry_angle_h[array_index] = int(gantry_angle_data);
}
fclose(data_file);
}
}
void read_data_chunk_v0( const int num_histories, const int start_file_num, const int end_file_num )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
if( WRITE_SSD_ANGLES )
{
ut_entry_angle = (float*) malloc(mem_size_hist_floats);
uv_entry_angle = (float*) malloc(mem_size_hist_floats);
ut_exit_angle = (float*) malloc(mem_size_hist_floats);
uv_exit_angle = (float*) malloc(mem_size_hist_floats);
}
/*
Contains the following headers:
Magic number identifier: "PCTD" (4-byte string)
Format version identifier (integer)
Number of events in file (integer)
Projection angle (float | degrees)
Beam energy (float | MeV)
Acquisition/generation date (integer | Unix time)
Pre-process date (integer | Unix time)
Phantom name or description (variable length string)
Data source (variable length string)
Prepared by (variable length string)
* Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string.
Event data:
Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory.
Detector coordinates in mm relative to a phantom center, given in the detector coordinate system:
t0 (float * N)
t1 (float * N)
t2 (float * N)
t3 (float * N)
v0 (float * N)
v1 (float * N)
v2 (float * N)
v3 (float * N)
u0 (float * N)
u1 (float * N)
u2 (float * N)
u3 (float * N)
WEPL in mm (float * N)
*/
char user_response[20];
char data_filename[128];
//int array_index = 0;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
int gantry_position = file_num / NUM_SCANS;
int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
int scan_number = file_num % NUM_SCANS + 1;
//int scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
int file_histories;
data_file.read((char*)&file_histories, sizeof(int));
puts("Reading headers from file...\n");
float projection_angle, beam_energy;
int generation_date, preprocess_date;
int phantom_name_size, data_source_size, prepared_by_size;
char *phantom_name, *data_source, *prepared_by;
data_file.read((char*)&projection_angle, sizeof(float));
data_file.read((char*)&beam_energy, sizeof(float));
data_file.read((char*)&generation_date, sizeof(int));
data_file.read((char*)&preprocess_date, sizeof(int));
data_file.read((char*)&phantom_name_size, sizeof(int));
phantom_name = (char*)malloc(phantom_name_size);
data_file.read(phantom_name, phantom_name_size);
data_file.read((char*)&data_source_size, sizeof(int));
data_source = (char*)malloc(data_source_size);
data_file.read(data_source, data_source_size);
data_file.read((char*)&prepared_by_size, sizeof(int));
prepared_by = (char*)malloc(prepared_by_size);
data_file.read(prepared_by, prepared_by_size);
printf("Loading %d histories from file\n", num_histories);
int data_size = num_histories * sizeof(float);
data_file.read((char*)t_in_1_h, data_size);
data_file.read((char*)t_in_2_h, data_size);
data_file.read((char*)t_out_1_h, data_size);
data_file.read((char*)t_out_2_h, data_size);
data_file.read((char*)v_in_1_h, data_size);
data_file.read((char*)v_in_2_h, data_size);
data_file.read((char*)v_out_1_h, data_size);
data_file.read((char*)v_out_2_h, data_size);
data_file.read((char*)u_in_1_h, data_size);
data_file.read((char*)u_in_2_h, data_size);
data_file.read((char*)u_out_1_h, data_size);
data_file.read((char*)u_out_2_h, data_size);
data_file.read((char*)WEPL_h, data_size);
//float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
for( int i = 0; i < num_histories; i++ )
{
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[i] *= 0.1;
v_in_2_h[i] *= 0.1;
v_out_1_h[i] *= 0.1;
v_out_2_h[i] *= 0.1;
t_in_1_h[i] *= 0.1;
t_in_2_h[i] *= 0.1;
t_out_1_h[i] *= 0.1;
t_out_2_h[i] *= 0.1;
WEPL_h[i] *= 0.1;
if( WEPL_h[i] < 0 )
printf("WEPL[%d] = %3f\n", i, WEPL_h[i] );
u_in_1_h[i] *= 0.1;
u_in_2_h[i] *= 0.1;
u_out_1_h[i] *= 0.1;
u_out_2_h[i] *= 0.1;
if( WRITE_SSD_ANGLES )
{
ut_entry_angle[i] = atan2f( t_in_2_h[i] - t_in_1_h[i], u_in_2_h[i] - u_in_1_h[i] );
uv_entry_angle[i] = atan2f( v_in_2_h[i] - v_in_1_h[i], u_in_2_h[i] - u_in_1_h[i] );
ut_exit_angle[i] = atan2f( t_out_2_h[i] - t_out_1_h[i], u_out_2_h[i] - u_out_1_h[i] );
uv_exit_angle[i] = atan2f( v_out_2_h[i] - v_out_1_h[i], u_out_2_h[i] - u_out_1_h[i] );
}
}
gantry_angle_h[i] = int(projection_angle);
}
data_file.close();
if( WRITE_SSD_ANGLES )
{
sprintf(data_filename, "%s_%03d%s", "ut_entry_angle", gantry_angle, ".txt" );
write_array_to_disk( data_filename, output_directory, output_folder, ut_entry_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "uv_entry_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, uv_entry_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "ut_exit_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, ut_exit_angle, COLUMNS, ROWS, SLICES, file_histories, true );
sprintf(data_filename, "%s_%03d%s", "uv_exit_angle", gantry_angle, ".txt" );
write_array_to_disk( "ut_entry_angle", output_directory, output_folder, uv_exit_angle, COLUMNS, ROWS, SLICES, file_histories, true );
}
}
}
}
void read_data_chunk_v1( const int num_histories, const int start_file_num, const int end_file_num ){
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
t_in_1_h = (float*) malloc(mem_size_hist_floats);
t_in_2_h = (float*) malloc(mem_size_hist_floats);
t_out_1_h = (float*) malloc(mem_size_hist_floats);
t_out_2_h = (float*) malloc(mem_size_hist_floats);
u_in_1_h = (float*) malloc(mem_size_hist_floats);
u_in_2_h = (float*) malloc(mem_size_hist_floats);
u_out_1_h = (float*) malloc(mem_size_hist_floats);
u_out_2_h = (float*) malloc(mem_size_hist_floats);
v_in_1_h = (float*) malloc(mem_size_hist_floats);
v_in_2_h = (float*) malloc(mem_size_hist_floats);
v_out_1_h = (float*) malloc(mem_size_hist_floats);
v_out_2_h = (float*) malloc(mem_size_hist_floats);
WEPL_h = (float*) malloc(mem_size_hist_floats);
gantry_angle_h = (int*) malloc(mem_size_hist_ints);
/*
Contains the following headers:
Magic number identifier: "PCTD" (4-byte string)
Format version identifier (integer)
Number of events in file (integer)
Projection angle (float | degrees)
Beam energy (float | MeV)
Acquisition/generation date (integer | Unix time)
Pre-process date (integer | Unix time)
Phantom name or description (variable length string)
Data source (variable length string)
Prepared by (variable length string)
* Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string.
Event data:
Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory.
Detector coordinates in mm relative to a phantom center, given in the detector coordinate system:
t0 (float * N)
t1 (float * N)
t2 (float * N)
t3 (float * N)
v0 (float * N)
v1 (float * N)
v2 (float * N)
v3 (float * N)
u0 (float * N)
u1 (float * N)
u2 (float * N)
u3 (float * N)
WEPL in mm (float * N)
*/
char user_response[20];
char data_filename[128];
//int array_index = 0;
for( int file_num = start_file_num; file_num <= end_file_num; file_num++ )
{
int gantry_position = file_num / NUM_SCANS;
int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL;
int scan_number = file_num % NUM_SCANS + 1;
//int scan_histories = histories_per_file[file_num];
printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number );
sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension );
ifstream data_file(data_filename, ios::binary);
if( data_file == NULL )
{
fputs( "File not found: Check that the directories and files are properly named.", stderr );
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
char magic_number[5];
data_file.read(magic_number, 4);
magic_number[4] = '\0';
if( strcmp(magic_number, "PCTD") ) {
puts("Error: unknown file type (should be PCTD)!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
int version_id;
data_file.read((char*)&version_id, sizeof(int));
if( version_id == 0 )
{
int num_histories;
data_file.read((char*)&num_histories, sizeof(int));
puts("Reading headers from file...\n");
float projection_angle, beam_energy;
int generation_date, preprocess_date;
int phantom_name_size, data_source_size, prepared_by_size;
char *phantom_name, *data_source, *prepared_by;
data_file.read((char*)&projection_angle, sizeof(float));
data_file.read((char*)&beam_energy, sizeof(float));
data_file.read((char*)&generation_date, sizeof(int));
data_file.read((char*)&preprocess_date, sizeof(int));
data_file.read((char*)&phantom_name_size, sizeof(int));
phantom_name = (char*)malloc(phantom_name_size);
data_file.read(phantom_name, phantom_name_size);
data_file.read((char*)&data_source_size, sizeof(int));
data_source = (char*)malloc(data_source_size);
data_file.read(data_source, data_source_size);
data_file.read((char*)&prepared_by_size, sizeof(int));
prepared_by = (char*)malloc(prepared_by_size);
data_file.read(prepared_by, prepared_by_size);
printf("Loading %d histories from file\n", num_histories);
int data_size = num_histories * sizeof(float);
data_file.read((char*)t_in_1_h, data_size);
data_file.read((char*)t_in_2_h, data_size);
data_file.read((char*)t_out_1_h, data_size);
data_file.read((char*)t_out_2_h, data_size);
data_file.read((char*)v_in_1_h, data_size);
data_file.read((char*)v_in_2_h, data_size);
data_file.read((char*)v_out_1_h, data_size);
data_file.read((char*)v_out_2_h, data_size);
data_file.read((char*)u_in_1_h, data_size);
data_file.read((char*)u_in_2_h, data_size);
data_file.read((char*)u_out_1_h, data_size);
data_file.read((char*)u_out_2_h, data_size);
data_file.read((char*)WEPL_h, data_size);
//float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data;
for( int i = 0; i < num_histories; i++ )
{
if( DATA_IN_MM )
{
// Convert the input data from mm to cm
v_in_1_h[i] *= 0.1;
v_in_2_h[i] *= 0.1;
v_out_1_h[i] *= 0.1;
v_out_2_h[i] *= 0.1;
t_in_1_h[i] *= 0.1;
t_in_2_h[i] *= 0.1;
t_out_1_h[i] *= 0.1;
t_out_2_h[i] *= 0.1;
WEPL_h[i] *= 0.1;
if( WEPL_h[i] < 0 )
printf("WEPL[%d] = %3f\n", i, WEPL_h[i] );
u_in_1_h[i] *= 0.1;
u_in_2_h[i] *= 0.1;
u_out_1_h[i] *= 0.1;
u_out_2_h[i] *= 0.1;
}
gantry_angle_h[i] = int(projection_angle);
}
data_file.close();
}
}
}
void recon_volume_intersections( const int num_histories )
{
//printf("There are %d histories in this projection\n", num_histories );
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bool = sizeof(bool) * num_histories;
// Allocate GPU memory
cudaMalloc((void**) &t_in_1_d, mem_size_hist_floats);
cudaMalloc((void**) &t_in_2_d, mem_size_hist_floats);
cudaMalloc((void**) &t_out_1_d, mem_size_hist_floats);
cudaMalloc((void**) &t_out_2_d, mem_size_hist_floats);
cudaMalloc((void**) &u_in_1_d, mem_size_hist_floats);
cudaMalloc((void**) &u_in_2_d, mem_size_hist_floats);
cudaMalloc((void**) &u_out_1_d, mem_size_hist_floats);
cudaMalloc((void**) &u_out_2_d, mem_size_hist_floats);
cudaMalloc((void**) &v_in_1_d, mem_size_hist_floats);
cudaMalloc((void**) &v_in_2_d, mem_size_hist_floats);
cudaMalloc((void**) &v_out_1_d, mem_size_hist_floats);
cudaMalloc((void**) &v_out_2_d, mem_size_hist_floats);
cudaMalloc((void**) &WEPL_d, mem_size_hist_floats);
cudaMalloc((void**) &gantry_angle_d, mem_size_hist_ints);
cudaMalloc((void**) &x_entry_d, mem_size_hist_floats);
cudaMalloc((void**) &y_entry_d, mem_size_hist_floats);
cudaMalloc((void**) &z_entry_d, mem_size_hist_floats);
cudaMalloc((void**) &x_exit_d, mem_size_hist_floats);
cudaMalloc((void**) &y_exit_d, mem_size_hist_floats);
cudaMalloc((void**) &z_exit_d, mem_size_hist_floats);
cudaMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &traversed_recon_volume_d, mem_size_hist_bool);
cudaMemcpy(t_in_1_d, t_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(t_in_2_d, t_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(t_out_1_d, t_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(t_out_2_d, t_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(u_in_1_d, u_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(u_in_2_d, u_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(u_out_1_d, u_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(u_out_2_d, u_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(v_in_1_d, v_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(v_in_2_d, v_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(v_out_1_d, v_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(v_out_2_d, v_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
cudaMemcpy(gantry_angle_d, gantry_angle_h, mem_size_hist_ints, cudaMemcpyHostToDevice) ;
cudaMemcpy(WEPL_d, WEPL_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ;
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
recon_volume_intersections_GPU<<<dimGrid, dimBlock>>>
(
num_histories, gantry_angle_d, traversed_recon_volume_d, WEPL_d,
t_in_1_d, t_in_2_d, t_out_1_d, t_out_2_d,
u_in_1_d, u_in_2_d, u_out_1_d, u_out_2_d,
v_in_1_d, v_in_2_d, v_out_1_d, v_out_2_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d,
xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d,
relative_ut_angle_d, relative_uv_angle_d
);
free(t_in_1_h);
free(t_in_2_h);
free(v_in_1_h);
free(v_in_2_h);
free(u_in_1_h);
free(u_in_2_h);
free(t_out_1_h);
free(t_out_2_h);
free(v_out_1_h);
free(v_out_2_h);
free(u_out_1_h);
free(u_out_2_h);
cudaFree(t_in_1_d);
cudaFree(t_in_2_d);
cudaFree(v_in_1_d);
cudaFree(v_in_2_d);
cudaFree(u_in_1_d);
cudaFree(u_in_2_d);
cudaFree(t_out_1_d);
cudaFree(t_out_2_d);
cudaFree(v_out_1_d);
cudaFree(v_out_2_d);
cudaFree(u_out_1_d);
cudaFree(u_out_2_d);
cudaFree(gantry_angle_d);
}
__global__ void recon_volume_intersections_GPU
(
int num_histories, int* gantry_angle, bool* traversed_recon_volume, float* WEPL,
float* t_in_1, float* t_in_2, float* t_out_1, float* t_out_2,
float* u_in_1, float* u_in_2, float* u_out_1, float* u_out_2,
float* v_in_1, float* v_in_2, float* v_out_1, float* v_out_2,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* relative_ut_angle, float* relative_uv_angle
)
{
/*
Determine if the proton path passes through the reconstruction volume (i.e. intersects the reconstruction
cylinder twice) and if it does, determine the x, y, and z positions in the global/object coordinate system where
the proton enters and exits the reconstruction volume. The origin of the object coordinate system is defined to
be at the center of the reconstruction cylinder so that its volume is bounded by:
-RECON_CYL_RADIUS <= x <= RECON_CYL_RADIUS
-RECON_CYL_RADIUS <= y <= RECON_CYL_RADIUS
-RECON_CYL_HEIGHT/2 <= z <= RECON_CYL_HEIGHT/2
First, the coordinates of the points where the proton path intersected the entry/exit detectors must be
calculated. Since the detectors records data in the detector coordinate system, data in the utv coordinate
system must be converted into the global/object coordinate system. The coordinate transformation can be
accomplished using a rotation matrix with an angle of rotation determined by the angle between the two
coordinate systems, which is the gantry_angle, in this case:
Rotate ut-coordinate system to xy-coordinate system
x = cos( gantry_angle ) * u - sin( gantry_angle ) * t
y = sin( gantry_angle ) * u + cos( gantry_angle ) * t
Rotate xy-coordinate system to ut-coordinate system
u = cos( gantry_angle ) * x + sin( gantry_angle ) * y
t = cos( gantry_angle ) * y - sin( gantry_angle ) * x
If a proton passes through the reconstruction volume, then the line defining its path in the
xy-plane will intersect the circle defining the boundary of the reconstruction cylinder in the xy-plane twice.
We can determine if the proton path passes through the reconstruction volume by equating the equations of the
proton path and the circle. This produces a second order polynomial which we must solve:
f(x)_proton = f(x)_cylinder
mx+b = sqrt(r^2 - x^2)
m^2x^2 + 2mbx + b^2 = r^2 - x^2
(m^2 + 1)x^2 + 2mbx + (b^2 - r^2) = 0
ax^2 + bx + c = 0
=> a = m^2 + 1
b = 2mb
c = b^2 - r^2
We can solve this using the quadratic formula ([-b +/- sqrt(b^2-4ac)]/2a). If the proton passed through the
reconstruction volume, then the determinant will be greater than zero ( b^2-4ac > 0 ) and the quadratic formula
will return two unique points of intersection. The intersection point closest to where the proton entry/exit
path intersects the entry/exit
detector plane is calculated and The proton entry/exit path If the determinant <= 0, then the proton path does not go through the reconstruction
volume and we need not determine intersection coordinates. Two points are returned by the quadratic formula
for each reconstruction cylinder intersection, the coordinates closest to the point where the entry/exit path
intersected the detector plane are determined
If the exit/entry path travels through the cone bounded by y=|x| && y=-|x| the x_coordinates will be small
and the difference between the entry and exit x-coordinates will approach zero, causing instabilities in trig
functions and slope calculations ( x difference in denominator). To overcome these innaccurate calculations,
coordinates for these proton paths will be rotated PI/2 radians(90 degrees) prior to calculations and rotated
back when they are completed using a rotation matrix transformation again:
Positive Rotation By 90 Degrees
x' = cos( 90 ) * x - sin( 90 ) * y = -y
y' = sin( 90 ) * x + cos( 90 ) * y = x
Negative Rotation By 90 Degree
x' = cos( 90 ) * x + sin( 90 ) * y = y
y' = cos( 90 ) * y - sin( 90 ) * x = -x
*/
float a = 0, b = 0, c = 0;
float x_intercept_1, x_intercept_2, y_intercept_1, y_intercept_2, squared_distance_1, squared_distance_2;
float x_temp, y_temp;
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
float rotation_angle_radians = gantry_angle[i] * ANGLE_TO_RADIANS;
traversed_recon_volume[i] = false;
if( i < num_histories )
{
/***************************************************************************************************************/
/**************************************** Check entry information **********************************************/
/***************************************************************************************************************/
// Determine if the proton path enters the reconstruction volume. The proton path is defined using the entry angle and
// position where the proton intersected the entry SSD which is closest to the object. If this line projected onto the
// xy plane intersects the reconstruction cylinder, the line will intersect the circle in the xy plane which describes the
// boundary of the reconstruction cylinder twice and its entry elevation will be within the height of the cylinder.
// Relevant angles in radians: gantry angle, proton path entry angle in ut and xy planes.
float ut_entry_angle = atan2f( t_in_2[i] - t_in_1[i], u_in_2[i] - u_in_1[i] );
xy_entry_angle[i] = ut_entry_angle + rotation_angle_radians;
if( xy_entry_angle[i] < 0 )
xy_entry_angle[i] += TWO_PI;
// Rotate entry detector positions
float x_in = ( cosf( rotation_angle_radians ) * u_in_2[i] ) - ( sinf( rotation_angle_radians ) * t_in_2[i] );
float y_in = ( sinf( rotation_angle_radians ) * u_in_2[i] ) + ( cosf( rotation_angle_radians ) * t_in_2[i] );
// Determine if entry points should be rotated
bool entry_in_cone =
( (xy_entry_angle[i] > PI_OVER_4) && (xy_entry_angle[i] < THREE_PI_OVER_4) )
||
( (xy_entry_angle[i] > FIVE_PI_OVER_4) && (xy_entry_angle[i] < SEVEN_PI_OVER_4) );
// Rotate x_in & y_in by 90 degrees, if necessary
if( entry_in_cone )
{
x_temp = x_in;
y_temp = y_in;
x_in = -y_temp;
y_in = x_temp;
xy_entry_angle[i] += PI_OVER_2;
}
float m_in = tanf( xy_entry_angle[i] ); // proton entry path slope
float b_in = y_in - m_in * x_in; // proton entry path y-intercept
// Quadratic formula coefficients
a = 1 + pow(m_in, 2); // x^2 coefficient
b = 2 * m_in * b_in; // x coefficient
c = pow(b_in, 2) - pow(RECON_CYL_RADIUS, 2 ); // 1 coefficient
float entry_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant
bool entered = ( entry_discriminant > 0 ); // Proton path intersected twice
// Find both intersection points of the circle; closest one to the entry SSDs is the entry position
// Notice that x_intercept_2 = ( -b - sqrt(...) ) / ( 2 * a ) has the negative sign pulled out and following calculations modified as necessary
// e.g. x_intercept_2 = -x_real_2
// y_intercept_2 = -y_real_2
// squared_distance_2 = sqd_real_2 since (x_intercept_2 + x_in)^2 = (-x_intercept_2 - x_in)^2 = (x_real_2 - x_in)^2 (same for y term)
// This negation is also considered when assigning x_entry/y_entry using -x_intercept_2/y_intercept_2 *(TRUE/FALSE = 1/0)
if( entered )
{
x_intercept_1 = ( sqrtf(entry_discriminant) - b ) / ( 2 * a );
x_intercept_2 = ( sqrtf(entry_discriminant) + b ) / ( 2 * a );
y_intercept_1 = m_in * x_intercept_1 + b_in;
y_intercept_2 = m_in * x_intercept_2 - b_in;
squared_distance_1 = pow(x_intercept_1 - x_in, 2) + pow(y_intercept_1 - y_in, 2);
squared_distance_2 = pow(x_intercept_2 + x_in, 2) + pow(y_intercept_2 + y_in, 2);
x_entry[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2);
y_entry[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2);
}
// Unrotate by 90 degrees, if necessary
if( entry_in_cone )
{
x_temp = x_entry[i];
y_temp = y_entry[i];
x_entry[i] = y_temp;
y_entry[i] = -x_temp;
xy_entry_angle[i] -= PI_OVER_2;
}
/***************************************************************************************************************/
/****************************************** Check exit information *********************************************/
/***************************************************************************************************************/
// Repeat the procedure above, this time to determine if the proton path exited the reconstruction volume and if so, the
// x,y,z position where it exited
float ut_exit_angle = atan2f( t_out_2[i] - t_out_1[i], u_out_2[i] - u_out_1[i] );
xy_exit_angle[i] = ut_exit_angle + rotation_angle_radians;
if( xy_exit_angle[i] < 0 )
xy_exit_angle[i] += TWO_PI;
// Rotate exit detector positions
float x_out = ( cosf(rotation_angle_radians) * u_out_1[i] ) - ( sinf(rotation_angle_radians) * t_out_1[i] );
float y_out = ( sinf(rotation_angle_radians) * u_out_1[i] ) + ( cosf(rotation_angle_radians) * t_out_1[i] );
// Determine if exit points should be rotated
bool exit_in_cone =
( (xy_exit_angle[i] > PI_OVER_4) && (xy_exit_angle[i] < THREE_PI_OVER_4) )
||
( (xy_exit_angle[i] > FIVE_PI_OVER_4) && (xy_exit_angle[i] < SEVEN_PI_OVER_4) );
// Rotate x_out & y_out by 90 degrees, if necessary
if( exit_in_cone )
{
x_temp = x_out;
y_temp = y_out;
x_out = -y_temp;
y_out = x_temp;
xy_exit_angle[i] += PI_OVER_2;
}
float m_out = tanf( xy_exit_angle[i] ); // proton entry path slope
float b_out = y_out - m_out * x_out; // proton entry path y-intercept
// Quadratic formula coefficients
a = 1 + pow(m_out, 2); // x^2 coefficient
b = 2 * m_out * b_out; // x coefficient
c = pow(b_out, 2) - pow(RECON_CYL_RADIUS, 2); // 1 coefficient
float exit_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant
bool exited = ( exit_discriminant > 0 ); // Proton path intersected twice
// Find both intersection points of the circle; closest one to the exit SSDs is the exit position
if( exited )
{
x_intercept_1 = ( sqrtf(exit_discriminant) - b ) / ( 2 * a );
x_intercept_2 = ( sqrtf(exit_discriminant) + b ) / ( 2 * a );// -x calculated
y_intercept_1 = m_out * x_intercept_1 + b_out;
y_intercept_2 = m_out * x_intercept_2 - b_out;// -y calculated
squared_distance_1 = pow(x_intercept_1 - x_out, 2) + pow(y_intercept_1 - y_out, 2);
squared_distance_2 = pow(x_intercept_2 + x_out, 2) + pow(y_intercept_2 + y_out, 2);// modified due to -x and -y calcs above
x_exit[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2);
y_exit[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2);
}
// Unrotate by 90 degrees, if necessary
if( exit_in_cone )
{
x_temp = x_exit[i];
y_temp = y_exit[i];
x_exit[i] = y_temp;
y_exit[i] = -x_temp;
xy_exit_angle[i] -= PI_OVER_2;
}
/***************************************************************************************************************/
/***************************************** Check z(v) direction ************************************************/
/***************************************************************************************************************/
// Relevant angles/slopes in radians for entry and exit in the uv plane
float uv_entry_slope = ( v_in_2[i] - v_in_1[i] ) / ( u_in_2[i] - u_in_1[i] );
float uv_exit_slope = ( v_out_2[i] - v_out_1[i] ) / ( u_out_2[i] - u_out_1[i] );
float uv_entry_angle = atan2( v_in_2[i] - v_in_1[i], u_in_2[i] - u_in_1[i] );
float uv_exit_angle = atan2( v_out_2[i] - v_out_1[i], u_out_2[i] - u_out_1[i] );
xz_entry_angle[i] = uv_entry_angle;
xz_exit_angle[i] = uv_exit_angle;
if( xz_entry_angle[i] < 0 )
xz_entry_angle[i] += TWO_PI;
if( xz_exit_angle[i] < 0 )
xz_exit_angle[i] += TWO_PI;
// Calculate the u coordinate for the entry and exit points of the reconstruction volume and then use the uv slope calculated
// from the detector entry and exit positions to determine the z position of the proton as it entered and exited the
// reconstruction volume
/*
u-coordinate of the entry and exit points of the reconsruction cylinder can be found using an inverse rotation
u = cos( gantry_angle ) * x + sin( gantry_angle ) * y
*/
float u_entry = ( cosf( rotation_angle_radians ) * x_entry[i] ) + ( sinf( rotation_angle_radians ) * y_entry[i] );
float u_exit = ( cosf(rotation_angle_radians) * x_exit[i] ) + ( sinf(rotation_angle_radians) * y_exit[i] );
z_entry[i] = v_in_2[i] + uv_entry_slope * ( u_entry - u_in_2[i] );
z_exit[i] = v_out_1[i] - uv_exit_slope * ( u_out_1[i] - u_exit );
// Even if the proton path intersected the circle describing the boundary of the cylinder twice, it may not have actually
// passed through the reconstruction volume or may have only passed through part way. If |z_entry|> RECON_CYL_HEIGHT/2 ,
// then something off happened since the the source is around z=0 and we do not want to use this history. If the
// |z_entry| < RECON_CYL_HEIGHT/2 and |z_exit| > RECON_CYL_HEIGHT/2 then we want to use the history but the x_exit and
// y_exit positions need to be calculated again based on how far through the cylinder the proton passed before exiting it
if( entered && exited )
{
if( ( fabs(z_entry[i]) <= RECON_CYL_HEIGHT * 0.5 ) && ( fabs(z_exit[i]) > RECON_CYL_HEIGHT * 0.5 ) )
{
float recon_cyl_fraction = fabs( ( ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5 - z_entry[i] ) / ( z_exit[i] - z_entry[i] ) );
x_exit[i] = x_entry[i] + recon_cyl_fraction * ( x_exit[i] - x_entry[i] );
y_exit[i] = y_entry[i] + recon_cyl_fraction * ( y_exit[i] - y_entry[i] );
z_exit[i] = ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5;
}
else if( fabs(z_entry[i]) > RECON_CYL_HEIGHT * 0.5 )
{
entered = false;
exited = false;
}
// Check the measurement locations. Do not allow more than 5 cm difference in entry and exit in t and v. This gets
// rid of spurious events.
if( ( fabs(t_out_1[i] - t_in_2[i]) > 5 ) || ( fabs(v_out_1[i] - v_in_2[i]) > 5 ) )
{
entered = false;
exited = false;
}
}
relative_ut_angle[i] = ut_exit_angle - ut_entry_angle;
relative_uv_angle[i] = uv_exit_angle - uv_entry_angle;
// Proton passed through the reconstruction volume only if it both entered and exited the reconstruction cylinder
traversed_recon_volume[i] = entered && exited;
}
}
void bin_valid_histories( const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bool = sizeof(bool) * num_histories;
traversed_recon_volume_h = (bool*) calloc( num_histories, sizeof(bool) );
bin_num_h = (int*) calloc( num_histories, sizeof(int) );
x_entry_h = (float*) calloc( num_histories, sizeof(float) );
y_entry_h = (float*) calloc( num_histories, sizeof(float) );
z_entry_h = (float*) calloc( num_histories, sizeof(float) );
x_exit_h = (float*) calloc( num_histories, sizeof(float) );
y_exit_h = (float*) calloc( num_histories, sizeof(float) );
z_exit_h = (float*) calloc( num_histories, sizeof(float) );
xy_entry_angle_h = (float*) calloc( num_histories, sizeof(float) );
xz_entry_angle_h = (float*) calloc( num_histories, sizeof(float) );
xy_exit_angle_h = (float*) calloc( num_histories, sizeof(float) );
xz_exit_angle_h = (float*) calloc( num_histories, sizeof(float) );
relative_ut_angle_h = (float*) calloc( num_histories, sizeof(float) );
relative_uv_angle_h = (float*) calloc( num_histories, sizeof(float) );
cudaMalloc((void**) &bin_num_d, mem_size_hist_ints );
cudaMemcpy( bin_num_d, bin_num_h, mem_size_hist_ints, cudaMemcpyHostToDevice );
dim3 dimBlock( THREADS_PER_BLOCK );
dim3 dimGrid( (int)( num_histories/THREADS_PER_BLOCK ) + 1 );
bin_valid_histories_GPU<<<dimGrid, dimBlock>>>
(
num_histories, bin_counts_d, bin_num_d, traversed_recon_volume_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d,
mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d,
xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d,
relative_ut_angle_d, relative_uv_angle_d
);
cudaMemcpy( traversed_recon_volume_h, traversed_recon_volume_d, mem_size_hist_bool, cudaMemcpyDeviceToHost );
cudaMemcpy( bin_num_h, bin_num_d, mem_size_hist_ints, cudaMemcpyDeviceToHost );
cudaMemcpy( x_entry_h, x_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( y_entry_h, y_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( z_entry_h, z_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( x_exit_h, x_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( y_exit_h, y_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( z_exit_h, z_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( xy_entry_angle_h, xy_entry_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( xz_entry_angle_h, xz_entry_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( xy_exit_angle_h, xy_exit_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( xz_exit_angle_h, xz_exit_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( relative_ut_angle_h, relative_ut_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
cudaMemcpy( relative_uv_angle_h, relative_uv_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost );
char data_filename[128];
if( WRITE_BIN_WEPLS )
{
sprintf(data_filename, "%s_%03d%s", "bin_numbers", gantry_angle_h[0], ".txt" );
write_array_to_disk( data_filename, output_directory, output_folder, bin_num_h, COLUMNS, ROWS, SLICES, num_histories, true );
}
int offset = 0;
for( int i = 0; i < num_histories; i++ )
{
if( traversed_recon_volume_h[i] && ( bin_num_h[i] >= 0 ) )
{
bin_num_vector.push_back( bin_num_h[i] );
//gantry_angle_vector.push_back( gantry_angle_h[i] );
WEPL_vector.push_back( WEPL_h[i] );
x_entry_vector.push_back( x_entry_h[i] );
y_entry_vector.push_back( y_entry_h[i] );
z_entry_vector.push_back( z_entry_h[i] );
x_exit_vector.push_back( x_exit_h[i] );
y_exit_vector.push_back( y_exit_h[i] );
z_exit_vector.push_back( z_exit_h[i] );
xy_entry_angle_vector.push_back( xy_entry_angle_h[i] );
xz_entry_angle_vector.push_back( xz_entry_angle_h[i] );
//xy_exit_angle_vector.push_back( xy_exit_angle_h[i] );
//xz_exit_angle_vector.push_back( xz_exit_angle_h[i] );
relative_ut_angle_vector.push_back( relative_ut_angle_h[i] );
relative_uv_angle_vector.push_back( relative_uv_angle_h[i] );
offset++;
recon_vol_histories++;
}
}
printf( "%d out of %d histories passed intersection cuts this iteration\n", offset, num_histories );
free( traversed_recon_volume_h );
free( bin_num_h );
free( x_entry_h );
free( y_entry_h );
free( z_entry_h );
free( x_exit_h );
free( y_exit_h );
free( z_exit_h );
free( xy_entry_angle_h );
free( xz_entry_angle_h );
free( xy_exit_angle_h );
free( xz_exit_angle_h );
free( relative_ut_angle_h );
free( relative_uv_angle_h );
//cudaFree( bin_num_d );
cudaFree( xy_entry_angle_d );
cudaFree( xz_entry_angle_d );
cudaFree( xy_exit_angle_d );
cudaFree( xz_exit_angle_d );
cudaFree( relative_ut_angle_d );
cudaFree( relative_uv_angle_d );
}
__global__ void bin_valid_histories_GPU
(
int num_histories, int* bin_counts, int* bin_num, bool* traversed_recon_volume,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit,
float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* relative_ut_angle, float* relative_uv_angle
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
float x_midpath, y_midpath, z_midpath, path_angle;
int angle_bin, t_bin, v_bin;
float angle, t, v;
x_midpath = ( x_entry[i] + x_exit[i] ) / 2;
y_midpath = ( y_entry[i] + y_exit[i] ) / 2;
z_midpath = ( z_entry[i] + z_exit[i] ) / 2;
path_angle = atan2( ( y_exit[i] - y_entry[i] ) , ( x_exit[i] - x_entry[i] ) );
if( path_angle < 0 )
path_angle += 2*PI;
angle_bin = int( ( path_angle * RADIANS_TO_ANGLE / ANGULAR_BIN_SIZE ) + 0.5) % ANGULAR_BINS;
angle = angle_bin * ANGULAR_BIN_SIZE * ANGLE_TO_RADIANS;
t = y_midpath * cosf(angle) - x_midpath * sinf(angle);
t_bin = int( (t / T_BIN_SIZE ) + T_BINS/2);
v = z_midpath;
v_bin = int( (v / V_BIN_SIZE ) + V_BINS/2);
if( traversed_recon_volume[i] )
{
if( (t_bin >= 0) && (v_bin >= 0) && (t_bin < T_BINS) && (v_bin < V_BINS) )
{
bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS;
atomicAdd( &bin_counts[bin_num[i]], 1 );
atomicAdd( &mean_WEPL[bin_num[i]], WEPL[i] );
atomicAdd( &mean_rel_ut_angle[bin_num[i]], relative_ut_angle[i] );
atomicAdd( &mean_rel_uv_angle[bin_num[i]], relative_uv_angle[i] );
}
else
bin_num[i] = -1;
}
}
}
/************************************************************************************************************************************************************/
/*************************************************************** Statistical Analysis and Cuts **************************************************************/
/************************************************************************************************************************************************************/
void calculate_means()
{
puts("Calculating the Mean for Each Bin Before Cuts...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
calculate_means_GPU<<< dimGrid, dimBlock >>>
(
bin_counts_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d
);
//cudaMemcpy( bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, cudaMemcpyDeviceToHost );
//cudaMemcpy( mean_WEPL_h, mean_WEPL_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost );
//cudaMemcpy( mean_rel_ut_angle_h, mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost );
//cudaMemcpy( mean_rel_uv_angle_h, mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost );
//write_array_to_disk("bin_counts_h_pre", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_WEPL_h", output_directory, output_folder, mean_WEPL_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_rel_ut_angle_h", output_directory, output_folder, mean_rel_ut_angle_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
//write_array_to_disk("mean_rel_uv_angle_h", output_directory, output_folder, mean_rel_uv_angle_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
free(bin_counts_h);
free(mean_WEPL_h);
free(mean_rel_ut_angle_h);
free(mean_rel_uv_angle_h);
}
__global__ void calculate_means_GPU( int* bin_counts, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle )
{
int v = blockIdx.x;
int angle = blockIdx.y;
int t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
{
mean_WEPL[bin] /= bin_counts[bin];
mean_rel_ut_angle[bin] /= bin_counts[bin];
mean_rel_uv_angle[bin] /= bin_counts[bin];
}
}
void sum_squared_deviations( const int start_position, const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
cudaMalloc((void**) &bin_num_d, mem_size_hist_ints);
cudaMalloc((void**) &WEPL_d, mem_size_hist_floats);
cudaMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
//cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats);
//cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats);
cudaMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats);
cudaMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, cudaMemcpyHostToDevice);
cudaMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
cudaMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
cudaMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
//cudaMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
//cudaMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
cudaMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
cudaMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice);
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
sum_squared_deviations_GPU<<<dimGrid, dimBlock>>>
(
num_histories, bin_num_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d,
WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d,
stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, relative_ut_angle_d, relative_uv_angle_d
);
cudaFree( bin_num_d );
cudaFree( WEPL_d );
cudaFree( xy_entry_angle_d );
cudaFree( xz_entry_angle_d );
//cudaFree( xy_exit_angle_d );
//cudaFree( xz_exit_angle_d );
cudaFree( relative_ut_angle_d );
cudaFree( relative_uv_angle_d );
}
__global__ void sum_squared_deviations_GPU
(
int num_histories, int* bin_num, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle,
float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, float* relative_ut_angle, float* relative_uv_angle
)
{
float WEPL_difference, rel_ut_angle_difference, rel_uv_angle_difference;
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
/* float ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
if( fabs(ut_diff) > PI )
{
printf("Hello\n");
if( xy_entry_angle[i] > PI )
xy_entry_angle[i] -= TWO_PI;
if( xy_exit_angle[i] > PI )
xy_exit_angle[i] -= TWO_PI;
ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
}
float uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
if( fabs(uv_diff) > PI )
{
if( xz_entry_angle[i] > PI )
xz_entry_angle[i] -= TWO_PI;
if( xz_exit_angle[i] > PI )
xz_exit_angle[i] -= TWO_PI;
uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
}*/
WEPL_difference = WEPL[i] - mean_WEPL[bin_num[i]];
rel_ut_angle_difference = relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]];
rel_uv_angle_difference = relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]];
//rel_ut_angle_difference = ut_diff - mean_rel_ut_angle[bin_num[i]];
//rel_uv_angle_difference = uv_diff - mean_rel_uv_angle[bin_num[i]];
atomicAdd( &stddev_WEPL[bin_num[i]], WEPL_difference * WEPL_difference);
atomicAdd( &stddev_rel_ut_angle[bin_num[i]], rel_ut_angle_difference * rel_ut_angle_difference );
atomicAdd( &stddev_rel_uv_angle[bin_num[i]], rel_uv_angle_difference * rel_uv_angle_difference );
}
}
void calculate_standard_deviations()
{
puts("Calculating standard deviations for each bin...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
calculate_standard_deviations_GPU<<< dimGrid, dimBlock >>>
(
bin_counts_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d
);
//cudaFree( bin_counts_d );
}
__global__ void calculate_standard_deviations_GPU( int* bin_counts, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle )
{
int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
{
// SAMPLE_STD_DEV = true/false = 1/0 => std_dev = SUM{i = 1 -> N} [ ( mu - x_i)^2 / ( N - 1/0 ) ]
stddev_WEPL[bin] = sqrtf( stddev_WEPL[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
stddev_rel_ut_angle[bin] = sqrtf( stddev_rel_ut_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
stddev_rel_uv_angle[bin] = sqrtf( stddev_rel_uv_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) );
}
syncthreads();
bin_counts[bin] = 0;
}
void statistical_cuts( const int start_position, const int num_histories )
{
unsigned int mem_size_hist_floats = sizeof(float) * num_histories;
unsigned int mem_size_hist_ints = sizeof(int) * num_histories;
unsigned int mem_size_hist_bools = sizeof(bool) * num_histories;
passed_cuts_h = (bool*) calloc (num_histories, sizeof(bool) );
cudaMalloc( (void**) &bin_num_d, mem_size_hist_ints );
cudaMalloc( (void**) &WEPL_d, mem_size_hist_floats );
cudaMalloc( (void**) &xy_entry_angle_d, mem_size_hist_floats );
cudaMalloc( (void**) &xz_entry_angle_d, mem_size_hist_floats );
//cudaMalloc( (void**) &xy_exit_angle_d, mem_size_hist_floats );
//cudaMalloc( (void**) &xz_exit_angle_d, mem_size_hist_floats );
cudaMalloc( (void**) &relative_ut_angle_d, mem_size_hist_floats );
cudaMalloc( (void**) &relative_uv_angle_d, mem_size_hist_floats );
cudaMalloc( (void**) &passed_cuts_d, mem_size_hist_bools );
cudaMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, cudaMemcpyHostToDevice );
cudaMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
cudaMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
cudaMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
//cudaMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
//cudaMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
cudaMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
cudaMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice );
cudaMemcpy( passed_cuts_d, passed_cuts_h, mem_size_hist_bools, cudaMemcpyHostToDevice );
//puts("Before kernel");
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid( int( num_histories / THREADS_PER_BLOCK ) + 1 );
statistical_cuts_GPU<<< dimGrid, dimBlock >>>
(
num_histories, bin_counts_d, bin_num_d, sinogram_d, WEPL_d,
xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d,
mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d,
stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d,
passed_cuts_d, relative_ut_angle_d, relative_uv_angle_d
);
//puts("After kernel");
cudaMemcpy( passed_cuts_h, passed_cuts_d, mem_size_hist_bools, cudaMemcpyDeviceToHost);
//printf("start iteration %d\n", iteration );
for( int i = 0; i < num_histories; i++ )
{
if( passed_cuts_h[i] )
{
//printf("start i = %d\n", i );
//printf("index = %d\n", start_position + i );
bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i];
//gantry_angle_vector[post_cut_histories] = gantry_angle_vector[start_position + i];
WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i];
x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i];
y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i];
z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i];
x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i];
y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i];
z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i];
xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i];
xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i];
//xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i];
//xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i];
relative_ut_angle_vector[post_cut_histories] = relative_ut_angle_vector[start_position + i];
relative_uv_angle_vector[post_cut_histories] = relative_uv_angle_vector[start_position + i];
post_cut_histories++;
}
}
//printf("end iteration %d\n", iteration );
}
__global__ void statistical_cuts_GPU
(
int num_histories, int* bin_counts, int* bin_num, float* sinogram, float* WEPL,
float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle,
float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle,
float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle,
bool* passed_cuts, float* relative_ut_angle, float* relative_uv_angle
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( i < num_histories )
{
/*float ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
if( ut_diff > PI )
{
if( xy_entry_angle[i] > PI )
xy_entry_angle[i] -= TWO_PI;
if( xy_exit_angle[i] > PI )
xy_exit_angle[i] -= TWO_PI;
ut_diff = xy_exit_angle[i] - xy_entry_angle[i];
}
float uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
if( uv_diff > PI )
{
if( xz_entry_angle[i] > PI )
xz_entry_angle[i] -= TWO_PI;
if( xz_exit_angle[i] > PI )
xz_exit_angle[i] -= TWO_PI;
uv_diff = xz_exit_angle[i] - xz_entry_angle[i];
}*/
bool passed_ut_cut = ( fabs( relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) );
bool passed_uv_cut = ( fabs( relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );
/*bool passed_ut_cut = ( fabs( ut_diff - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) );
bool passed_uv_cut = ( fabs( uv_diff - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );*/
bool passed_WEPL_cut = ( fabs( mean_WEPL[bin_num[i]] - WEPL[i] ) <= ( SIGMAS_TO_KEEP * stddev_WEPL[bin_num[i]] ) );
passed_cuts[i] = passed_ut_cut && passed_uv_cut && passed_WEPL_cut;
if( passed_cuts[i] )
{
atomicAdd( &sinogram[bin_num[i]], WEPL[i] );
atomicAdd( &bin_counts[bin_num[i]], 1 );
}
}
}
/************************************************************************************************************************************************************/
/*********************************************************************** MLP ********************************************************************************/
/************************************************************************************************************************************************************/
void create_MLP_test_image()
{
double x, y;
//Create space carve object, init to zeros
MLP_test_image_h = (int*)calloc( MLP_IMAGE_VOXELS, sizeof(int));
for( int slice = 0; slice < MLP_IMAGE_SLICES; slice++ )
{
for( int row = 0; row < MLP_IMAGE_ROWS; row++ )
{
for( int column = 0; column < MLP_IMAGE_COLUMNS; column++ )
{
x = ( column - MLP_IMAGE_COLUMNS/2 + 0.5) * MLP_IMAGE_VOXEL_WIDTH;
y = ( MLP_IMAGE_ROWS/2 - row - 0.5 ) * MLP_IMAGE_VOXEL_HEIGHT;
if( pow( x, 2 ) + pow( y, 2 ) <= pow( double(MLP_IMAGE_RECON_CYL_RADIUS), 2) )
MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 1;
if( pow( x / MLP_PHANTOM_A, 2 ) + pow( y / MLP_PHANTOM_B, 2 ) <= 1 )
MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 8;
}
}
}
}
//void MLP_entry_exit
//(
// int*& image, bool entry,
// float x_start, float y_start, float z_start,
// float xy_angle, float xz_angle,
// float x_object, float y_object, float z_object
//)
//{
// /********************************************************************************************/
// /********************************* Voxel Walk Parameters ************************************/
// /********************************************************************************************/
// int x_move_direction, y_move_direction, z_move_direction;
// int x_voxel_step, y_voxel_step, z_voxel_step;
// float delta_x, delta_y, delta_z;
// float x_move, y_move, z_move;
// /********************************************************************************************/
// /**************************** Status Tracking Information ***********************************/
// /********************************************************************************************/
// float x, y, z;
// float x_inside, y_inside, z_inside;
// float x_to_go, y_to_go, z_to_go;
// float x_extension, y_extension;
// float voxel_x, voxel_y, voxel_z;
// float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
// int voxel;
// bool outside_image, end_walk;
// /********************************************************************************************/
// /************************** Initial and Boundary Conditions *********************************/
// /********************************************************************************************/
// // Initial Distance Into Voxel
// x_inside = modf( ( x_start + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
// y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
// z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
//
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
// voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT );
// voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS );
// voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
// /********************************************************************************************/
// /***************************** Path and Walk Information ************************************/
// /********************************************************************************************/
// // Lengths/Distances as x is Incremented One Voxel
// delta_x = VOXEL_WIDTH;
// delta_y = abs( (y_exit - y_entry)/(x_exit - x_start) * VOXEL_WIDTH );
// delta_z = abs( (z_exit - z_entry)/(x_exit - x_start) * VOXEL_WIDTH );
// // Overwrite NaN if Divisors on delta_i Calculations Above
// if( x_start == x_exit )
// {
// delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
// delta_y = VOXEL_HEIGHT;
// delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
// if( y_entry == y_exit )
// {
// delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );
// delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );;
// delta_z = VOXEL_THICKNESS;
// }
// }
// x_move = 0, y_move = 0, z_move = 0;
// /*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
// y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
// z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/
// x_move_direction = ( cosf(xy_angle) >= 0 ) - ( cosf(xy_angle) < 0 );
// y_move_direction = ( sinf(xy_angle) >= 0 ) - ( sinf(xy_angle) < 0 );
// z_move_direction = ( sinf(xz_angle) >= 0 ) - ( sinf(xz_angle) < 0 );
// x_voxel_step = x_move_direction;
// y_voxel_step = -y_move_direction;
// z_voxel_step = -z_move_direction;
// /********************************************************************************************/
// /**************************** Status Tracking Information ***********************************/
// /********************************************************************************************/
// x = x_entry, y = y_entry, z = z_entry;
// x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
// y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
// z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
//
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// //fgets(user_response, sizeof(user_response), stdin);
// /********************************************************************************************/
// /*********************************** Voxel Walk Routine *************************************/
// /********************************************************************************************/
// if( z_entry != z_exit )
// {
// while( !end_walk )
// {
// // Change in z for Move to Voxel Edge in x and y
// x_extension = delta_z/delta_x * x_to_go;
// y_extension = delta_z/delta_y * y_to_go;
// if( z_to_go <= x_extension && z_to_go <= y_extension )
// {
// //printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
// x_move = delta_x / delta_z * z_to_go;
// y_move = delta_y / delta_z * z_to_go;
// z_move = z_to_go;
// x_to_go -= x_move;
// y_to_go -= y_move;
// z_to_go = VOXEL_THICKNESS;
// voxel_z += z_voxel_step;
// if( x_to_go == 0 )
// {
// voxel_x += x_voxel_step;
// x_to_go = VOXEL_WIDTH;
// }
// if( y_to_go == 0 )
// {
// voxel_y += y_voxel_step;
// y_to_go = VOXEL_HEIGHT;
// }
// }
// //If Next Voxel Edge is in x or xy Diagonal
// else if( x_extension <= y_extension )
// {
// //printf(" x_extension <= y_extension \n");
// x_move = x_to_go;
// y_move = delta_y / delta_x * x_to_go;
// z_move = delta_z / delta_x * x_to_go;
// x_to_go = VOXEL_WIDTH;
// y_to_go -= y_move;
// z_to_go -= z_move;
// voxel_x += x_voxel_step;
// if( y_to_go == 0 )
// {
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// }
// // Else Next Voxel Edge is in y
// else
// {
// //printf(" y_extension < x_extension \n");
// x_move = delta_x / delta_y * y_to_go;
// y_move = y_to_go;
// z_move = delta_z / delta_y * y_to_go;
// x_to_go -= x_move;
// y_to_go = VOXEL_HEIGHT;
// z_to_go -= z_move;
// voxel_y += y_voxel_step;
// }
// x += x_move_direction * x_move;
// y += y_move_direction * y_move;
// z += z_move_direction * z_move;
// //fgets(user_response, sizeof(user_response), stdin);
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// }
// }
// else
// {
// //printf("z_exit == z_entry\n");
// while( !end_walk )
// {
// // Change in x for Move to Voxel Edge in y
// y_extension = delta_x/delta_y * y_to_go;
// //If Next Voxel Edge is in x or xy Diagonal
// if( x_to_go <= y_extension )
// {
// //printf(" x_to_go <= y_extension \n");
// x_move = x_to_go;
// y_move = delta_y / delta_x * x_to_go;
// x_to_go = VOXEL_WIDTH;
// y_to_go -= y_move;
// voxel_x += x_voxel_step;
// if( y_to_go == 0 )
// {
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// }
// // Else Next Voxel Edge is in y
// else
// {
// //printf(" y_extension < x_extension \n");
// x_move = delta_x / delta_y * y_to_go;
// y_move = y_to_go;
// x_to_go -= x_move;
// y_to_go = VOXEL_HEIGHT;
// voxel_y += y_voxel_step;
// }
// x += x_move_direction * x_move;
// y += y_move_direction * y_move;
// voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
// outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
// if( !outside_image )
// image[voxel] = 0;
// end_walk = ( voxel == voxel_out ) || outside_image;
// //fgets(user_response, sizeof(user_response), stdin);
// }// end: while( !end_walk )
// }//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h
//}
void MLP_test()
{
char user_response[20];
float x_entry = -3.0;
float y_entry = -sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_entry,2) );
float z_entry = 0.0;
float x_exit = 2.5;
float y_exit = sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_exit,2) );
float z_exit = 0.0;
float xy_entry_angle = 25 * PI/180, xz_entry_angle = 0.0;
float xy_exit_angle = 45* PI/180, xz_exit_angle = 0.0;
float x_in_object, y_in_object, z_in_object;
float u_in_object, t_in_object, v_in_object;
float x_out_object, y_out_object, z_out_object;
float u_out_object, t_out_object, v_out_object;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float voxel_x, voxel_y, voxel_z;
int voxel;
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float delta_x, delta_y, delta_z;
float x_extension, y_extension;
float x_move, y_move, z_move;
bool end_walk, outside_image;
bool entered_object = false, exited_object = false;
/********************************************************************************************************/
/******************** Determine if and Where the Proton Enters the Actual Object ************************/
/********************************************************************************************************/
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_entry ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_entry ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("voxel = %d \n", voxel );
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = MLP_IMAGE_VOXEL_WIDTH;
delta_y = tanf( xy_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH;
delta_z = tanf( xz_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH;
if( x_entry == x_exit )
{
delta_x = 0;
delta_y = MLP_IMAGE_VOXEL_HEIGHT;
delta_z = tanf(xz_entry_angle) / tanf(xy_entry_angle) * MLP_IMAGE_VOXEL_HEIGHT;
if( y_entry == y_exit )
{
delta_x = 0;
delta_y = 0;
delta_z = MLP_IMAGE_VOXEL_THICKNESS;
}
}
//printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z );
x_move = 0, y_move = 0, z_move = 0;
/*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/
x_move_direction = ( cosf(xy_entry_angle) >= 0 ) - ( cosf(xy_entry_angle) < 0 );
y_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 );
z_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry, y = y_entry, z = z_entry;
x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside;
//printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
end_walk = entered_object || outside_image;
///********************************************************************************************/
///*********************************** Voxel Walk Routine *************************************/
///********************************************************************************************/
if( z_entry != z_exit )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = MLP_IMAGE_VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
end_walk = entered_object || outside_image;
}
}
else
{
//printf("z_exit == z_entry\n");
while( !end_walk )
{
//printf("beginning of loop\n\n");
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//printf("y_extension = %3f\n", y_extension);
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("end of loop\n\n");
//printf("x_move = %3f y_move = %3f\n", x_move, y_move );
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
entered_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
//printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f", MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 );
x += x_move_direction * x_move;
y += y_move_direction * y_move;
end_walk = entered_object || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry != z_exit => z_entry == z_exit
if( entered_object )
{
x_in_object = x;
y_in_object = y;
z_in_object = z;
}
/********************************************************************************************************/
/******************** Determine if and Where the Proton Exited the Actual Object ************************/
/********************************************************************************************************/
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_exit + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_exit ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_exit ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
//printf("voxel = %d \n", voxel );
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = MLP_IMAGE_VOXEL_WIDTH;
delta_y = tanf( xy_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH;
delta_z = tanf( xz_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH;
if( x_entry == x_exit )
{
delta_x = 0;
delta_y = MLP_IMAGE_VOXEL_HEIGHT;
delta_z = tanf(xz_exit_angle) / tanf(xy_exit_angle) * MLP_IMAGE_VOXEL_HEIGHT;
if( y_entry == y_exit )
{
delta_x = 0;
delta_y = 0;
delta_z = MLP_IMAGE_VOXEL_THICKNESS;
}
}
//printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z );
x_move = 0, y_move = 0, z_move = 0;
//x_move_direction = ( x_exit <= x_entry ) - ( x_exit > x_entry );
//y_move_direction = ( y_exit <= y_entry ) - ( y_exit > y_entry );
//z_move_direction = ( z_exit <= z_entry ) - ( z_exit > z_entry );
x_move_direction = ( cosf(xy_exit_angle) < 0 ) - ( cosf(xy_exit_angle) >= 0 );
y_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 );
z_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_exit, y = y_exit, z = z_exit;
x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside;
//printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go);
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
end_walk = exited_object || outside_image;
///********************************************************************************************/
///*********************************** Voxel Walk Routine *************************************/
///********************************************************************************************/
if( z_entry != z_exit )
{
//printf("z_entry != z_exit\n");
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = MLP_IMAGE_VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
voxel = int( voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS );
outside_image = ( voxel_x >= MLP_IMAGE_COLUMNS ) || ( voxel_y >= MLP_IMAGE_ROWS ) || ( voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
end_walk = exited_object || outside_image;
}
}
else
{
//printf("z_entry == z_exit\n");
while( !end_walk )
{
//printf("beginning of loop\n\n");
//printf("x = %3f y = %3f z = %3f\n", x, y, z );
//printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
//printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//printf("y_extension = %3f\n", y_extension);
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = MLP_IMAGE_VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = MLP_IMAGE_VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
/*printf("end of loop\n\n");
printf("x_move = %3f y_move = %3f\n", x_move, y_move );
printf("x = %3f y = %3f z = %3f\n", x, y, z );
printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go);
printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);*/
outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES );
if( !outside_image )
{
exited_object = MLP_test_image_h[voxel] == 8;
MLP_test_image_h[voxel] = 4;
}
//printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f",MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 );
x += x_move_direction * x_move;
y += y_move_direction * y_move;
end_walk = exited_object || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_exit != z_exit => z_exit == z_exit
if( exited_object )
{
x_out_object = x;
y_out_object = y;
z_out_object = z;
}
x_inside = modf( ( x_in_object + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_in_object ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_in_object ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
//printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z);
//printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
int path[1000];
int path_index = 0;
double chord_lengths[1000];
MLP_test_image_h[voxel] = 0;
path[path_index++] = voxel;
u_in_object = ( cosf( xy_entry_angle ) * x_in_object ) + ( sinf( xy_entry_angle ) * y_in_object );
u_out_object = ( cosf( xy_entry_angle ) * x_out_object ) + ( sinf( xy_entry_angle ) * y_out_object );
t_in_object = ( cosf( xy_entry_angle ) * y_in_object ) - ( sinf( xy_entry_angle ) * x_in_object );
t_out_object = ( cosf( xy_entry_angle ) * y_out_object ) - ( sinf( xy_entry_angle ) * x_out_object );
v_in_object = z_in_object;
v_out_object = z_out_object;
double T_0[2] = { t_in_object, 0 };
double T_2[2] = { t_out_object, xy_exit_angle - xy_entry_angle };
double V_0[2] = { v_in_object, xz_entry_angle };
double V_2[2] = { v_out_object, xz_exit_angle };
double u_2 = abs(u_out_object - u_in_object);
double u_0 = 0, u_1 = MLP_u_step;
double t_1_previous, v_1_previous;
double x_1_previous = x, y_1_previous = y, z_1_previous = z;
int voxel_x_previous = voxel_x;
int voxel_y_previous = voxel_y;
int voxel_z_previous = voxel_z;
int voxel_previous = voxel;
int voxels_passed;
double chord_segment;
double chord_fraction;
double x_to_edge, y_to_edge, z_to_edge;
//fgets(user_response, sizeof(user_response), stdin);
while( u_1 <= u_2 - MLP_u_step )
{
double R_0[4] = { 1.0, u_1 - u_0, 0.0 , 1.0}; //a,b,c,d
double R_0T[4] = { 1.0, 0.0, u_1 - u_0 , 1.0}; //a,c,b,d
double R_1[4] = { 1.0, u_2 - u_1, 0.0 , 1.0}; //a,b,c,d
double R_1T[4] = { 1.0, 0.0, u_2 - u_1 , 1.0}; //a,c,b,d
double sigma_1_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_1 - u_0)/X_0) ), 2.0 ) / X_0;
float sigma_t1 = (A_0/3)*pow(u_1, 3.0) + (A_1/12)*pow(u_1, 4.0) + (A_2/30)*pow(u_1, 5.0) + (A_3/60)*pow(u_1, 6.0) + (A_4/105)*pow(u_1, 7.0) + (A_5/168)*pow(u_1, 8.0);
float sigma_t1_theta1 = pow(u_1, 2.0 )*( (A_0/2) + (A_1/6)*u_1 + (A_2/12)*pow(u_1, 2.0) + (A_3/20)*pow(u_1, 3.0) + (A_4/30)*pow(u_1, 4.0) + (A_5/42)*pow(u_1, 5.0) );
float sigma_theta1 = A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0);
double determinant_Sigma_1 = sigma_t1 * sigma_theta1 - pow( sigma_t1_theta1, 2 );//ad-bc
double Sigma_1I[4] = // Sigma_1 Inverse = [1/det(Sigma_1)]*{ d, -b, -c, a }
{
sigma_theta1 / determinant_Sigma_1,
-sigma_t1_theta1 / determinant_Sigma_1,
-sigma_t1_theta1 / determinant_Sigma_1,
sigma_t1 / determinant_Sigma_1
};
double sigma_2_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_2 - u_1)/X_0 ) ), 2.0 ) / X_0;
double sigma_t2 = (A_0/3)*pow(u_2, 3.0) + (A_1/12)*pow(u_2, 4.0) + (A_2/30)*pow(u_2, 5.0) + (A_3/60)*pow(u_2, 6.0) + (A_4/105)*pow(u_2, 7.0) + (A_5/168)*pow(u_2, 8.0)
- (A_0/3)*pow(u_1, 3.0) - (A_1/4)*pow(u_1, 4.0) - (A_2/5)*pow(u_1, 5.0) - (A_3/6)*pow(u_1, 6.0) - (A_4/7)*pow(u_1, 7.0) - (A_5/8)*pow(u_1, 8.0)
+ 2*u_2*( (A_0/2)*pow(u_1, 2.0) + (A_1/3)*pow(u_1, 3.0) + (A_2/4)*pow(u_1, 4.0) + (A_3/5)*pow(u_1, 5.0) + (A_4/6)*pow(u_1, 6.0) + (A_5/7)*pow(u_1, 7.0) )
- pow(u_2, 2.0) * ( A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0) );
double sigma_t2_theta2 = pow(u_2, 2.0 )*( (A_0/2) + (A_1/6)*u_2 + (A_2/12)*pow(u_2, 2.0) + (A_3/20)*pow(u_2, 3.0) + (A_4/30)*pow(u_2, 4.0) + (A_5/42)*pow(u_2, 5.0) )
- u_2*u_1*( A_0 + (A_1/2)*u_1 + (A_2/3)*pow(u_1, 2.0) + (A_3/4)*pow(u_1, 3.0) + (A_4/5)*pow(u_1, 4.0) + (A_5/6)*pow(u_1, 5.0) )
+ pow(u_1, 2.0 )*( (A_0/2) + (A_1/3)*u_1 + (A_2/4)*pow(u_1, 2.0) + (A_3/5)*pow(u_1, 3.0) + (A_4/6)*pow(u_1, 4.0) + (A_5/7)*pow(u_1, 5.0) );
double sigma_theta2 = A_0 * ( u_2 - u_1 ) + ( A_1 / 2 ) * ( pow(u_2, 2.0) - pow(u_1, 2.0) ) + ( A_2 / 3 ) * ( pow(u_2, 3.0) - pow(u_1, 3.0) )
+ ( A_3 / 4 ) * ( pow(u_2, 4.0) - pow(u_1, 4.0) ) + ( A_4 / 5 ) * ( pow(u_2, 5.0) - pow(u_1, 5.0) ) + ( A_5 /6 )*( pow(u_2, 6.0) - pow(u_1, 6.0) );
double determinant_Sigma_2 = sigma_t2 * sigma_theta2 - pow( sigma_t2_theta2, 2 );//ad-bc
double Sigma_2I[4] = // Sigma_2 Inverse = [1/det(Sigma_2)]*{ d, -b, -c, a }
{
sigma_theta2 / determinant_Sigma_2,
-sigma_t2_theta2 / determinant_Sigma_2,
-sigma_t2_theta2 / determinant_Sigma_2,
sigma_t2 / determinant_Sigma_2
};
double first_term[4] =
{
Sigma_1I[0] + R_1T[0] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[1] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ),
Sigma_1I[1] + R_1T[0] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[1] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ),
Sigma_1I[2] + R_1T[2] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[3] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ),
Sigma_1I[3] + R_1T[2] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[3] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] )
};
double determinant_first_term = first_term[0] * first_term[3] - first_term[1] * first_term[2];
first_term[0] = first_term[3] / determinant_first_term;
first_term[1] = -first_term[1] / determinant_first_term;
first_term[2] = -first_term[2] / determinant_first_term;
first_term[3] = first_term[0] / determinant_first_term;
double second_term[2] =
{
Sigma_1I[0] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] )
+ Sigma_1I[1] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] )
+ R_1T[0] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] )
+ R_1T[1] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] )
,
Sigma_1I[2] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] )
+ Sigma_1I[3] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] )
+ R_1T[2] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] )
+ R_1T[3] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] )
};
double t_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1];
double theta_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1];
// Do v MLP Now
second_term[0] = Sigma_1I[0] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] )
+ Sigma_1I[1] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] )
+ R_1T[0] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] )
+ R_1T[1] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] );
second_term[1] = Sigma_1I[2] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] )
+ Sigma_1I[3] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] )
+ R_1T[2] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] )
+ R_1T[3] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] );
double v_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1];
double phi_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1];
// Rotate Coordinate From utv to xyz Coordinate System and Determine Which Voxel this Point on the MLP Path is in
double x_1 = ( cosf( xy_entry_angle ) * (u_in_object + u_1) ) - ( sinf( xy_entry_angle ) * t_1 );
double y_1 = ( sinf( xy_entry_angle ) * (u_in_object + u_1) ) + ( cosf( xy_entry_angle ) * t_1 );
double z_1 = v_in_object + v_1;
x_inside = modf( ( x_1 + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH;
y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_1 ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT;
z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_1 ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS;
x_voxel_step = (voxel_x >= voxel_x_previous ) - (voxel_x <= voxel_x_previous );
y_voxel_step = (voxel_y >= voxel_y_previous ) - (voxel_y <= voxel_y_previous );
z_voxel_step = (voxel_z >= voxel_z_previous ) - (voxel_z <= voxel_z_previous );
x_to_edge = (x_voxel_step < 0) * x_inside + (x_voxel_step > 0) * (VOXEL_WIDTH - x_inside);
y_to_edge = (y_voxel_step < 0) * y_inside + (y_voxel_step > 0) * (VOXEL_HEIGHT - y_inside);
z_to_edge = (z_voxel_step < 0) * z_inside + (z_voxel_step > 0) * (VOXEL_THICKNESS - z_inside);
voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS);
if( voxel != path[path_index - 1] )
path[path_index++] = voxel;
for( int i = 0; i < path_index; i++ )
printf( "path[i] = %d\n", path[i] );
printf( "path_index = %d\n\n", path_index );
fgets(user_response, sizeof(user_response), stdin);
MLP_test_image_h[voxel] = 0;
voxels_passed = (voxel_x - voxel_x_previous) + (voxel_y - voxel_y_previous) + (voxel_z - voxel_z_previous);
chord_segment = sqrt( pow( x_1_previous - x_1, 2 ) + pow( y_1_previous - y_1, 2 ) + pow( z_1_previous - z_1, 2 ) );
if( voxels_passed == 0 )
{
chord_lengths[path_index - 1] += chord_segment;
}
else if( voxels_passed == 1 )
{
if( x_voxel_step != 0 )
{
chord_fraction = x_to_edge / (x_1_previous - x_1);
}
else if( y_voxel_step != 0 )
{
chord_fraction = y_to_edge / (y_1_previous - y_1);
}
else
{
chord_fraction = z_to_edge / (z_1_previous - z_1);
}
chord_lengths[path_index - 1] += chord_fraction * chord_segment;
chord_lengths[path_index] += chord_segment - chord_lengths[path_index - 1];
}
else if( voxels_passed == 2 )
{
}
else if( voxels_passed == 3 )
{
}
u_1 += MLP_u_step;
t_1_previous = t_1;
v_1_previous = v_1;
x_1_previous = x_1;
y_1_previous = y_1;
z_1_previous = z_1;
voxel_x_previous = voxel_x;
voxel_y_previous = voxel_y;
voxel_z_previous = voxel_z;
voxel_previous = voxel;
}
}
/************************************************************************************************************************************************************/
/************************************************************************ FBP *******************************************************************************/
/************************************************************************************************************************************************************/
void initialize_sinogram()
{
puts("Allocating host/GPU memory and initializing sinogram...");
sinogram_h = (float*) calloc( NUM_BINS, sizeof(float) );
cudaMalloc((void**) &sinogram_d, MEM_SIZE_BINS_FLOATS );
cudaMemcpy( sinogram_d, sinogram_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice );
}
void construct_sinogram()
{
puts("Recalculating the mean WEPL for each bin and constructing the sinogram...");
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
construct_sinogram_GPU<<< dimGrid, dimBlock >>>( bin_counts_d, sinogram_d );
//cudaMemcpy(sinogram_h, sinogram_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost);
//write_array_to_disk("sinogram", output_directory, output_folder, sinogram_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, false );
//bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) );
//cudaMemcpy(bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, cudaMemcpyDeviceToHost) ;
//write_array_to_disk( "bin_counts_post", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS, NUM_BINS, true );
}
__global__ void construct_sinogram_GPU( int* bin_counts, float* sinogram )
{
int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x;
int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS;
if( bin_counts[bin] > 0 )
sinogram[bin] /= bin_counts[bin];
}
void filter()
{
puts("Doing the filtering...");
sinogram_filtered_h = (float*) calloc( NUM_BINS, sizeof(float) );
cudaMalloc((void**) &sinogram_filtered_d, MEM_SIZE_BINS_FLOATS);
cudaMemcpy( sinogram_filtered_d, sinogram_filtered_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice);
dim3 dimBlock( T_BINS );
dim3 dimGrid( V_BINS, ANGULAR_BINS );
filter_GPU<<< dimGrid, dimBlock >>>( sinogram_d, sinogram_filtered_d );
cudaMemcpy(sinogram_filtered_h, sinogram_filtered_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost) ;
free(sinogram_h);
cudaFree(sinogram_d);
cudaFree(sinogram_filtered_d);
}
__global__ void filter_GPU( float* sinogram, float* sinogram_filtered )
{
int t_bin_ref,angle_bin,t_bin,v_bin,t_bin_sep;
float filtered,t,v,scale_factor;
v_bin = blockIdx.x;
angle_bin = blockIdx.y;
t_bin = threadIdx.x;
v = ( v_bin - V_BINS/2 ) * V_BIN_SIZE + V_BIN_SIZE/2.0;
// Loop over strips for this strip
for( t_bin_ref = 0; t_bin_ref < T_BINS; t_bin_ref++ )
{
t = ( t_bin_ref - T_BINS/2 ) * T_BIN_SIZE + T_BIN_SIZE/2.0;
t_bin_sep = t_bin - t_bin_ref;
// scale_factor = r . path = cos(theta_{r,path})
scale_factor = SOURCE_RADIUS / sqrtf( SOURCE_RADIUS * SOURCE_RADIUS + t * t + v * v );
switch( FILTER_NUM )
{
case 0: // Ram-Lak
if( t_bin_sep == 0 )
filtered = 1.0 / ( 8.0 * powf( T_BIN_SIZE, 2.0 ) );
else if( t_bin_sep % 2 == 0 )
filtered = 0;
else
filtered = -1.0 / ( 2.0 * powf( T_BIN_SIZE * PI * t_bin_sep, 2.0 ) );
case 1: // Shepp-Logan filter
filtered = powf( powf(T_BIN_SIZE * PI, 2.0) * ( 1.0 - powf(2 * t_bin_sep, 2.0) ), -1.0 );
}
int strip_index = ( v_bin * ANGULAR_BINS * T_BINS ) + ( angle_bin * T_BINS );
sinogram_filtered[strip_index + t_bin] += T_BIN_SIZE * sinogram[strip_index + t_bin_ref] * filtered * scale_factor;
}
}
void backprojection()
{
puts("Doing the backprojection...");
printf("DEBUG: MEM_SIZE_IMAGE_FLOAT = %u\n", MEM_SIZE_IMAGE_FLOAT);
// Allocate host memory
puts("DEBUG: Allocate host memory");
char user_response[20];
X_h = (float*) calloc( VOXELS, sizeof(float) );
if( X_h == NULL )
{
printf("ERROR: Memory not allocated for X_h!\n");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
// Check that we don't have any corruptions up until now
for( int i = 0; i < NUM_BINS; i++ )
if( sinogram_filtered_h[i] != sinogram_filtered_h[i] )
printf("We have a nan in bin #%d\n", i);
float delta = GANTRY_ANGLE_INTERVAL * ANGLE_TO_RADIANS;
// Loop over the voxels
for( int slice = 0; slice < SLICES; slice++ )
{
for( int column = 0; column < COLUMNS; column++ )
{
for( int row = 0; row < ROWS; row++ )
{
float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH;
float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT;
float z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS;
//// If the voxel is outside a cylinder contained in the reconstruction volume, set to air
if( ( x * x + y * y ) > ( RECON_CYL_RADIUS * RECON_CYL_RADIUS ) )
X_h[( slice * COLUMNS * ROWS) + ( row * COLUMNS ) + column] = 0.00113;
else
{
// Sum over projection angles
for( int angle_bin = 0; angle_bin < ANGULAR_BINS; angle_bin++ )
{
// Rotate the pixel position to the beam-detector co-ordinate system
float u = x * cosf( angle_bin * delta ) + y * sinf( angle_bin * delta );
float t = -x * sinf( angle_bin * delta ) + y * cosf( angle_bin * delta );
float v = z;
// Project to find the detector number
float detector_number_t = ( t - u *( t / ( SOURCE_RADIUS + u ) ) ) / T_BIN_SIZE + T_BINS/2.0;
int t_bin = int( detector_number_t);
if( t_bin > detector_number_t )
t_bin -= 1;
float eta = detector_number_t - t_bin;
// Now project v to get detector number in v axis
float detector_number_v = ( v - u * ( v / ( SOURCE_RADIUS + u ) ) ) / V_BIN_SIZE + V_BINS/2.0;
int v_bin = int( detector_number_v);
if( v_bin > detector_number_v )
v_bin -= 1;
float epsilon = detector_number_v - v_bin;
// Calculate the fan beam scaling factor
float scale_factor = powf( SOURCE_RADIUS / ( SOURCE_RADIUS + u ), 2 );
//bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS;
// Compute the back-projection
int bin = t_bin + angle_bin * T_BINS + v_bin * ANGULAR_BINS * T_BINS;
int voxel = slice * COLUMNS * ROWS + row * COLUMNS + column;
// not sure why this won't compile without calculating the index ahead of time instead inside []s
int index = ANGULAR_BINS * T_BINS;
//if( ( ( bin + ANGULAR_BINS * T_BINS + 1 ) >= NUM_BINS ) || ( bin < 0 ) );
if( v_bin == V_BINS - 1 || ( bin < 0 ) )
{
X_h[voxel] += delta * 2 *( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]) * scale_factor;
}
//printf("The bin selected for this voxel does not exist!\n Slice: %d\n Column: %d\n Row: %d\n", slice, column, row);
else
{
// not sure why this won't compile without calculating the index ahead of time instead inside []s
/*X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;*/
X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;
// Multilpying by the gantry angle interval for each gantry angle is equivalent to multiplying the final answer by 2*PI and is better numerically
// so multiplying by delta each time should be replaced by X_h[voxel] *= 2 * PI after all contributions have been made, which is commented out below
/*X_h[voxel] += scale_factor * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin]
+ eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]
+ ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index]
+ eta * epsilon * sinogram_filtered_h[bin + index + 1] );*/
if(X_h[voxel]!=X_h[voxel])
printf("We have a nan in slice %d, column %d, and row %d\n", slice, column, row);
}
//X_h[voxel] *= 2 * PI;
}
}
}
}
}
free(sinogram_filtered_h);
FBP_object_h = (int*) calloc( COLUMNS * ROWS * SLICES, sizeof(int) );
for( int slice = 0; slice < SLICES; slice++ )
{
for( int row = 0; row < ROWS; row++ )
{
for( int column = 0; column < COLUMNS; column++ )
{
float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH;
float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT;
float d_squared = powf(x, 2) + powf(y, 2);
if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d_squared < powf(RECON_CYL_RADIUS, 2) ) )
FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 1;
else
FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 0;
}
}
}
//write_array_to_disk( "FBP_object", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk( "X_h", output_directory, output_folder, X_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk( "x_FBP", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
/************************************************************************************************************************************************************/
/****************************************************************** Image Initialization *******************************************************************/
/************************************************************************************************************************************************************/
void initialize_SC_hull( bool*& SC_hull_h, bool*& SC_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
SC_hull_h = (bool*)calloc( VOXELS, sizeof(bool));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
SC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = true;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
cudaMalloc((void**) &SC_hull_d, MEM_SIZE_IMAGE_BOOL);
cudaMemcpy(SC_hull_d, SC_hull_h, MEM_SIZE_IMAGE_BOOL, cudaMemcpyHostToDevice) ;
}
void initialize_MSC_hull( int*& MSC_hull_h, int*& MSC_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
MSC_hull_h = (int*)calloc( VOXELS, sizeof(int));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
MSC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
cudaMalloc((void**) &MSC_hull_d, MEM_SIZE_IMAGE_INT);
cudaMemcpy(MSC_hull_d, MSC_hull_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice) ;
}
void initialize_SM_hull( int*& SM_hull_h, int*& SM_hull_d )
{
/* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */
/* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */
/* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */
/* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */
// Allocate memory for the hull image on the host and initialize to zeros
SM_hull_h = (int*)calloc( VOXELS, sizeof(int));
float x, y;
// Set the inner cylinder of the hull image to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
SM_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
// Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU
cudaMalloc((void**) &SM_hull_d, MEM_SIZE_IMAGE_INT);
cudaMemcpy(SM_hull_d, SM_hull_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice) ;
}
void initialize_float_image( float*& float_image_h, float*& float_image_d )
{
//Create space carve object, init to zeros
float_image_h = (float*)calloc( VOXELS, sizeof(float));
double x, y;
// Set inner cylinder to 1s
for( int slice = 0; slice < SLICES; slice++ )
for( int row = 0; row < ROWS; row++ )
for( int column = 0; column < COLUMNS; column++ )
{
x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( ( (x * x) + (y * y) ) < double(RECON_CYL_RADIUS * RECON_CYL_RADIUS) )
float_image_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1;
}
cudaMalloc((void**) &float_image_d, MEM_SIZE_IMAGE_FLOAT);
cudaMemcpy(float_image_d, float_image_h, MEM_SIZE_IMAGE_FLOAT, cudaMemcpyHostToDevice) ;
}
/************************************************************************************************************************************************************/
/******************************************************************* Hull Detection *************************************************************************/
/************************************************************************************************************************************************************/
void hull_detection_initializations()
{
if( SC_ON || MSC_ON || SM_ON )
puts("Initializing hull-detection images...");
if( SC_ON )
initialize_SC_hull( SC_image_h, SC_image_d );
if( MSC_ON )
initialize_MSC_hull( MSC_image_h, MSC_image_d );
if( SM_ON )
initialize_SM_hull( SM_image_h, SM_image_d );
}
void hull_detection( int histories_to_process)
{
if( SC_ON && (!bad_data_angle( gantry_angle_h[0] ) || !RESTRICTED_ANGLES ) )
SC( histories_to_process );
if( MSC_ON )
MSC( histories_to_process );
if( SM_ON )
SM( histories_to_process );
}
__device__ void voxel_walk( bool*& image, float x_entry, float y_entry, float z_entry, float x_exit, float y_exit, float z_exit )
{
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit - y_entry)/(x_exit - x_entry) * VOXEL_WIDTH );
delta_z = abs( (z_exit - z_entry)/(x_exit - x_entry) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry == x_exit )
{
delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT );
if( y_entry == y_exit )
{
delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );
delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit );
y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit );
z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry, y = y_entry, z = z_entry;
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry != z_exit )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit == z_entry\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
image[voxel] = 0;
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h
}
void SC( int num_histories )
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
SC_GPU<<<dimGrid, dimBlock>>>
(
num_histories, SC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void SC_GPU
(
int num_histories, bool* SC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= SC_THRESHOLD) && (bin_num[i] >= 0) )
{
voxel_walk( SC_image, x_entry[i], y_entry[i], z_entry[i], x_exit[i], y_exit[i], z_exit[i] );
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) )
}
/************************************************************************************************************************************************************/
void MSC( int num_histories )
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
MSC_GPU<<<dimGrid, dimBlock>>>
(
num_histories, MSC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void MSC_GPU
(
int num_histories, int* MSC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] < MSC_THRESHOLD) && (bin_num[i] >= 0) )
{
//char user_response[20];
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i]) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i]) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry[i] == x_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
if( y_entry[i] == y_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );
delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] );
y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] );
z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry[i], y = y_entry[i], z = z_entry[i];
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry[i] != z_exit[i] )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit[i] == z_entry[i]\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &MSC_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i]
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) )
}
void MSC_threshold()
{
cudaMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
write_array_to_disk("MSC_image", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
MSC_threshold_GPU<<< dimGrid, dimBlock >>>( MSC_image_d );
cudaMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
write_array_to_disk("MSC_image_thresholded", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_MSC", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
cudaFree( MSC_image_d );
free(MSC_image_h);
}
__global__ void MSC_threshold_GPU( int* MSC_image )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
int difference, max_difference = 0;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = MSC_image[voxel] - MSC_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
}
syncthreads();
if( max_difference > MSC_DIFF_THRESH )
MSC_image[voxel] = 0;
else if( MSC_image[voxel] == 0 )
MSC_image[voxel] = 0;
else
MSC_image[voxel] = 1;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
MSC_image[voxel] = 0;
}
/************************************************************************************************************************************************************/
void SM( int num_histories)
{
dim3 dimBlock(THREADS_PER_BLOCK);
dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1);
SM_GPU<<<dimGrid, dimBlock>>>
(
num_histories, SM_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d,
x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d
);
}
__global__ void SM_GPU
(
int num_histories, int* SM_image, int* bin_num, bool* traversed_recon_volume, float* WEPL,
float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit
)
{
int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SM_LOWER_THRESHOLD) && (bin_num[i] >= 0) )
{
//char user_response[20];
/********************************************************************************************/
/********************************* Voxel Walk Parameters ************************************/
/********************************************************************************************/
int x_move_direction, y_move_direction, z_move_direction;
int x_voxel_step, y_voxel_step, z_voxel_step;
float delta_x, delta_y, delta_z;
float x_move, y_move, z_move;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
float x, y, z;
float x_inside, y_inside, z_inside;
float x_to_go, y_to_go, z_to_go;
float x_extension, y_extension;
float voxel_x, voxel_y, voxel_z;
float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out;
int voxel;
bool outside_image, end_walk;
/********************************************************************************************/
/************************** Initial and Boundary Conditions *********************************/
/********************************************************************************************/
// Initial Distance Into Voxel
x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH;
y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT;
z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH );
voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT );
voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );
voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS);
/********************************************************************************************/
/***************************** Path and Walk Information ************************************/
/********************************************************************************************/
// Lengths/Distances as x is Incremented One Voxel
delta_x = VOXEL_WIDTH;
delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH );
// Overwrite NaN if Divisors on delta_i Calculations Above
if( x_entry[i] == x_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
delta_y = VOXEL_HEIGHT;
delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT );
if( y_entry[i] == y_exit[i] )
{
delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );
delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );;
delta_z = VOXEL_THICKNESS;
}
}
x_move = 0, y_move = 0, z_move = 0;
x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] );
y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] );
z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] );
x_voxel_step = x_move_direction;
y_voxel_step = -y_move_direction;
z_voxel_step = -z_move_direction;
/********************************************************************************************/
/**************************** Status Tracking Information ***********************************/
/********************************************************************************************/
x = x_entry[i], y = y_entry[i], z = z_entry[i];
x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside;
y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside;
z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside;
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
/********************************************************************************************/
/*********************************** Voxel Walk Routine *************************************/
/********************************************************************************************/
if( z_entry[i] != z_exit[i] )
{
while( !end_walk )
{
// Change in z for Move to Voxel Edge in x and y
x_extension = delta_z/delta_x * x_to_go;
y_extension = delta_z/delta_y * y_to_go;
if( z_to_go <= x_extension && z_to_go <= y_extension )
{
//printf("z_to_go <= x_extension && z_to_go <= y_extension\n");
x_move = delta_x / delta_z * z_to_go;
y_move = delta_y / delta_z * z_to_go;
z_move = z_to_go;
x_to_go -= x_move;
y_to_go -= y_move;
z_to_go = VOXEL_THICKNESS;
voxel_z += z_voxel_step;
if( x_to_go == 0 )
{
voxel_x += x_voxel_step;
x_to_go = VOXEL_WIDTH;
}
if( y_to_go == 0 )
{
voxel_y += y_voxel_step;
y_to_go = VOXEL_HEIGHT;
}
}
//If Next Voxel Edge is in x or xy Diagonal
else if( x_extension <= y_extension )
{
//printf(" x_extension <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
z_move = delta_z / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
z_to_go -= z_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
z_move = delta_z / delta_y * y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
z_to_go -= z_move;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
z += z_move_direction * z_move;
//fgets(user_response, sizeof(user_response), stdin);
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
}
}
else
{
//printf("z_exit[i] == z_entry[i]\n");
while( !end_walk )
{
// Change in x for Move to Voxel Edge in y
y_extension = delta_x/delta_y * y_to_go;
//If Next Voxel Edge is in x or xy Diagonal
if( x_to_go <= y_extension )
{
//printf(" x_to_go <= y_extension \n");
x_move = x_to_go;
y_move = delta_y / delta_x * x_to_go;
x_to_go = VOXEL_WIDTH;
y_to_go -= y_move;
voxel_x += x_voxel_step;
if( y_to_go == 0 )
{
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
}
// Else Next Voxel Edge is in y
else
{
//printf(" y_extension < x_extension \n");
x_move = delta_x / delta_y * y_to_go;
y_move = y_to_go;
x_to_go -= x_move;
y_to_go = VOXEL_HEIGHT;
voxel_y += y_voxel_step;
}
x += x_move_direction * x_move;
y += y_move_direction * y_move;
voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS);
outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES );
if( !outside_image )
atomicAdd( &SM_image[voxel], 1 );
end_walk = ( voxel == voxel_out ) || outside_image;
//fgets(user_response, sizeof(user_response), stdin);
}// end: while( !end_walk )
}//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i]
}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SPACE_MODEL_LOWER_THRESHOLD) && (WEPL[i] <= SPACE_MODEL_UPPER_THRESHOLD) && (bin_num[i] >= 0) )
}
void SM_threshold()
{
// Copy the space modeled image from the GPU to the CPU and write it to file.
cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
write_array_to_disk("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) );
int* SM_differences_d;
cudaMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT );
cudaMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
carve_differences<<< dimGrid, dimBlock >>>( SM_differences_d, SM_image_d );
cudaMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost );
int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) );
int voxel;
int max_difference = 0;
for( int slice = 0; slice < SLICES; slice++ )
{
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences_h[voxel] > max_difference )
{
max_difference = SM_differences_h[voxel];
SM_thresholds_h[slice] = SM_image_h[voxel];
}
}
printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] );
max_difference = 0;
}
int* SM_thresholds_d;
unsigned int threshold_size = SLICES * sizeof(int);
cudaMalloc((void**) &SM_thresholds_d, threshold_size );
cudaMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, cudaMemcpyHostToDevice );
SM_threshold_GPU<<< dimGrid, dimBlock >>>( SM_image_d, SM_thresholds_d);
cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
//write_array_to_disk("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
cudaFree( SM_differences_d );
cudaFree( SM_thresholds_d );
cudaFree( SM_image_d );
free(SM_differences_h);
free(SM_thresholds_h);
free(SM_image_h);
}
__global__ void SM_threshold_GPU( int* SM_image, int* SM_threshold )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
if( voxel < VOXELS )
{
if( SM_image[voxel] > SM_THRESHOLD_MULTIPLIER * SM_threshold[slice] )
SM_image[voxel] = 1;
else
SM_image[voxel] = 0;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
SM_image[voxel] = 0;
}
}
void SM_threshold_2()
{
// Copy the space modeled image from the GPU to the CPU and write it to file.
cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
write_array_to_disk("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) );
int* SM_differences_d;
cudaMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT );
cudaMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice );
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
carve_differences<<< dimGrid, dimBlock >>>( SM_differences_d, SM_image_d );
cudaMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost );
int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) );
int voxel;
int max_difference = 0;
for( int slice = 0; slice < SLICES; slice++ )
{
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences_h[voxel] > max_difference )
{
max_difference = SM_differences_h[voxel];
SM_thresholds_h[slice] = SM_image_h[voxel];
}
}
printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] );
max_difference = 0;
}
int* SM_thresholds_d;
unsigned int threshold_size = SLICES * sizeof(int);
cudaMalloc((void**) &SM_thresholds_d, threshold_size );
cudaMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, cudaMemcpyHostToDevice );
SM_threshold_GPU<<< dimGrid, dimBlock >>>( SM_image_d, SM_thresholds_d);
cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
//write_array_to_disk("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, false );
write_array_to_disk("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
cudaFree( SM_differences_d );
cudaFree( SM_thresholds_d );
cudaFree( SM_image_d );
free(SM_differences_h);
free(SM_thresholds_h);
free(SM_image_h);
}
__global__ void SM_threshold_GPU_2( int* SM_image, int* SM_differences )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
int difference, max_difference = 0;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = SM_image[voxel] - SM_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
SM_differences[voxel] = max_difference;
}
syncthreads();
int slice_threshold;
max_difference = 0;
for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ )
{
voxel = pixel + slice * COLUMNS * ROWS;
if( SM_differences[voxel] > max_difference )
{
max_difference = SM_differences[voxel];
slice_threshold = SM_image[voxel];
}
}
syncthreads();
float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH;
float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT;
if( voxel < VOXELS )
{
if( SM_image[voxel] > SM_THRESHOLD_MULTIPLIER * slice_threshold )
SM_image[voxel] = 1;
else
SM_image[voxel] = 0;
if( powf(x, 2) + pow(y, 2) >= powf(RECON_CYL_RADIUS - max(VOXEL_WIDTH, VOXEL_HEIGHT)/2, 2 ) )
SM_image[voxel] = 0;
}
}
void hull_detection_finish()
{
if( SC_ON || MSC_ON || SM_ON )
puts("Performing Hull Thresholding and Writing Hull Images to Disk...");
if( SC_ON )
{
cudaMemcpy(SC_image_h, SC_image_d, MEM_SIZE_IMAGE_BOOL, cudaMemcpyDeviceToHost);
write_array_to_disk("x_sc", output_directory, output_folder, SC_image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
if( MSC_ON )
MSC_threshold();
if( SM_ON )
SM_threshold();
if( SC_ON || MSC_ON || SM_ON )
puts("Hull-Detection Complete.");
}
/************************************************************************************************************************************************************/
__global__ void carve_differences( int* carve_differences, int* image )
{
int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x;
int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS;
if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) )
{
int difference, max_difference = 0;
for( int current_row = row - 1; current_row <= row + 1; current_row++ )
{
for( int current_column = column - 1; current_column <= column + 1; current_column++ )
{
difference = image[voxel] - image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS];
if( difference > max_difference )
max_difference = difference;
}
}
carve_differences[voxel] = max_difference;
}
}
void averaging_filter( bool*& image_h, bool*& image_d, const int filter_size )
{
initialize_SC_hull(image_h, image_d);
float threshold = 0;
dim3 dimBlock( SLICES );
dim3 dimGrid( COLUMNS, ROWS );
averaging_filter_GPU<<< dimGrid, dimBlock >>>( image_d, filter_size, threshold);
cudaMemcpy(image_h, image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost) ;
write_array_to_disk( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
}
__global__ void averaging_filter_GPU( bool* image, const int filter_size, const float threshold )
{
int voxel_x = blockIdx.x;
int voxel_y = blockIdx.y;
int voxel_z = threadIdx.x;
int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS;
int sum = image[voxel];
if( (voxel_x > 0) && (voxel_y > 0) && (voxel_x < COLUMNS - 1) && (voxel_y < ROWS - 1) )
{
for( int i = voxel_x - filter_size/2; i <= voxel_x + filter_size/2; i++ )
for( int j = voxel_y - filter_size/2; j <= voxel_y + filter_size/2; j++ )
sum += image[i + j * COLUMNS + voxel_z * COLUMNS * ROWS];
}
//value[voxel] = sum > threshold;
syncthreads();
image[voxel] = sum > threshold;
}
/************************************************************************************************************************************************************/
/******************************************************** Memory Transfers, Maintenance, and Cleaning *******************************************************/
/************************************************************************************************************************************************************/
void initial_processing_memory_clean()
{
free( gantry_angle_h );
cudaFree( x_entry_d );
cudaFree( y_entry_d );
cudaFree( z_entry_d );
cudaFree( x_exit_d );
cudaFree( y_exit_d );
cudaFree( z_exit_d );
cudaFree( traversed_recon_volume_d );
cudaFree( bin_num_d );
cudaFree( WEPL_d);
}
void post_cut_memory_clean()
{
puts("Freeing unnecessary memory, resizing vectors and shrinking vectors to just fit remaining histories...");
free(passed_cuts_h );
free(stddev_rel_ut_angle_h);
free(stddev_rel_uv_angle_h);
free(stddev_WEPL_h);
cudaFree( passed_cuts_d );
cudaFree( bin_num_d );
cudaFree( WEPL_d );
cudaFree( xy_entry_angle_d );
cudaFree( xz_entry_angle_d );
//cudaFree( xy_exit_angle_d );
//cudaFree( xz_exit_angle_d );
cudaFree( relative_ut_angle_d );
cudaFree( relative_uv_angle_d );
cudaFree( mean_rel_ut_angle_d );
cudaFree( mean_rel_uv_angle_d );
cudaFree( mean_WEPL_d );
cudaFree( stddev_rel_ut_angle_d );
cudaFree( stddev_rel_uv_angle_d );
cudaFree( stddev_WEPL_d );
}
void resize_vectors( const int new_size )
{
bin_num_vector.resize( new_size );
//gantry_angle_vector.resize( new_size );
WEPL_vector.resize( new_size );
x_entry_vector.resize( new_size );
y_entry_vector.resize( new_size );
z_entry_vector.resize( new_size );
x_exit_vector.resize( new_size );
y_exit_vector.resize( new_size );
z_exit_vector.resize( new_size );
xy_entry_angle_vector.resize( new_size );
xz_entry_angle_vector.resize( new_size );
//xy_exit_angle_vector.resize( new_size );
//xz_exit_angle_vector.resize( new_size );
relative_ut_angle_vector.resize( new_size );
relative_uv_angle_vector.resize( new_size );
}
void shrink_vectors( const int new_capacity )
{
bin_num_vector.shrink_to_fit();
//gantry_angle_vector.shrink_to_fit();
WEPL_vector.shrink_to_fit();
x_entry_vector.shrink_to_fit();
y_entry_vector.shrink_to_fit();
z_entry_vector.shrink_to_fit();
x_exit_vector.shrink_to_fit();
y_exit_vector.shrink_to_fit();
z_exit_vector.shrink_to_fit();
xy_entry_angle_vector.shrink_to_fit();
xz_entry_angle_vector.shrink_to_fit();
//xy_exit_angle_vector.shrink_to_fit();
//xz_exit_angle_vector.shrink_to_fit();
relative_ut_angle_vector.shrink_to_fit();
relative_uv_angle_vector.shrink_to_fit();
}
/************************************************************************************************************************************************************/
/****************************************************** Routines for Writing Data Arrays/Vectors to Disk ****************************************************/
/************************************************************************************************************************************************************/
template<typename T> void write_array_to_disk( char* filename_base, const char* directory, const char* folder, T* data, const int x_max, const int y_max, const int z_max, const int elements, const bool single_file )
{
char filename[256];
ofstream output_file;
int index;
int num_files = z_max;
int z_start = 0;
int z_end = 1;
if( single_file )
{
num_files = 1;
z_end = z_max;
}
for( int file = 0; file < num_files; file++)
{
if( num_files == z_max )
sprintf( filename, "%s%s/%s_%d.txt", directory, folder, filename_base, file );
else
sprintf( filename, "%s%s/%s.txt", directory, folder, filename_base );
output_file.open(filename);
for(int z = z_start; z < z_end; z++)
{
for(int y = 0; y < y_max; y++)
{
for(int x = 0; x < x_max; x++)
{
index = x + ( y * x_max ) + ( z * x_max * y_max );
if( index >= elements )
break;
output_file << data[index] << " ";
}
if( index >= elements )
break;
output_file << endl;
}
if( index >= elements )
break;
}
z_start += 1;
z_end += 1;
output_file.close();
}
}
template<typename T> void write_vector_to_disk( char* filename_base, const char* directory, const char* folder, vector<T> data, const int x_max, const int y_max, const int z_max, const bool single_file )
{
char filename[256];
ofstream output_file;
int elements = data.size();
int index;
int num_files = z_max;
int z_start = 0;
int z_end = 1;
if( single_file )
{
num_files = 1;
z_end = z_max;
}
for( int file = 0; file < num_files; file++)
{
if( num_files == z_max )
sprintf( filename, "%s%s/%s_%d.txt", directory, folder, filename_base, file );
else
sprintf( filename, "%s%s/%s.txt", directory, folder, filename_base );
output_file.open(filename);
for(int z = z_start; z < z_end; z++)
{
for(int y = 0; y < y_max; y++)
{
for(int x = 0; x < x_max; x++)
{
index = x + ( y * x_max ) + ( z * x_max * y_max );
if( index >= elements )
break;
output_file << data[index] << " ";
}
if( index >= elements )
break;
output_file << endl;
}
if( index >= elements )
break;
}
z_start += 1;
z_end += 1;
output_file.close();
}
}
/********************************************************************* Helper Functions *********************************************************************/
/************************************************************************************************************************************************************/
bool bad_data_angle( const int angle )
{
static const int bad_angles_array[] = {80, 84, 88, 92, 96, 100, 00, 180, 260, 264, 268, 272, 276};
vector<int> bad_angles(bad_angles_array, bad_angles_array + sizeof(bad_angles_array) / sizeof(bad_angles_array[0]) );
bool bad_angle = false;
for( int i = 0; i < bad_angles.size(); i++ )
if( angle == bad_angles[i] )
bad_angle = true;
return bad_angle;
}
int calculate_x_voxel(const float x_position, const int x_voxels, const float voxel_width )
{
// -10 100 1 [-50 49] -40
float x_width = x_voxels * voxel_width;//100
float x_range = x_width/2;//50
return ( x_position + x_range) / voxel_width;//-10+50/1 = 40
//[0 99]
}
int calculate_y_voxel(const float y_position, const int y_voxels, const float voxel_height )
{
// 10 100 1 [-50 49] 40
float y_width = y_voxels * voxel_height;//100
float y_range = y_width/2;//50
return ( y_range - y_position ) / voxel_height;
}
int calculate_slice(const float z_position, const int z_voxels, const float voxel_thickness )
{
// -10 100 1 [-50 49] -40
float z_width = z_voxels * voxel_thickness;//100
float z_range = z_width/2;//50
return ( z_range - z_position ) / voxel_thickness;
}
void early_exit_if( bool early_exit)
{
if( early_exit )
{
char user_response[20];
puts("Hit enter to stop...");
fgets(user_response, sizeof(user_response), stdin);
exit(1);
}
}
void start_execution_timing()
{
start_time = clock();
}
void stop_execution_timing()
{
end_time = clock();
execution_time = (end_time - start_time) / CLOCKS_PER_SEC;
printf( "Total execution time : %3f\n", double(execution_time) );
}
/************************************************************************************************************************************************************/
/****************************************************************** Testing Functions ***********************************************************************/
/************************************************************************************************************************************************************/
void test_func()
{
//char user_response[20];
//initialize_MSC_hull(MSC_image_h, MSC_image_d);
////fgets(user_response, sizeof(user_response), stdin);
//dim3 dimBlock( SLICES );
//dim3 dimGrid( COLUMNS, ROWS );
//test_func_GPU<<< dimGrid, dimBlock >>>( MSC_image_d );
//cudaMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost);
////write_array_to_disk( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES, VOXELS, true );
//for( int i = 0; i < 20; i++ )
// cout << MSC_image_h[i] << endl;
cout << CLOCKS_PER_SEC << endl;
/*int num_elements = 5;
int init = 0;
int* series1 = (int*) calloc( num_elements, sizeof(int) );
int* series2 = (int*) calloc( num_elements, sizeof(int) );
for( int i = 0; i < num_elements; i++ )
{
series1[i] = i;
series2[i] = i;
}
int series[5] = {1, 2, 3, 4, 5 };
vector<int> vec (series, series + sizeof(series) / sizeof(int) );
int result = inner_product(series1, series1 + num_elements, series2, init );
cout << result << endl;*/
}
__global__ void test_func( int* image )
{
int voxel_x = blockIdx.x;
int voxel_y = blockIdx.y;
int voxel_z = threadIdx.x;
int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS;
int x = 0, y = 0, z = 0;
test_func_device( x, y, z );
image[voxel] = x * y * z;
}
__device__ void test_func_device( int& x, int& y, int& z )
{
x = 2;
y = 3;
z = 4;
} |
59e8aa1e222b43fb4762dc4de45c156fd31fd29d.hip | // !!! This is a file automatically generated by hipify!!!
#include "neural_net.h"
void NeuralNet::getComputationTime(void *X, int *y, double learning_rate,
std::vector<float> &fwd_computation_time,
std::vector<float> &bwd_computation_time) {
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
// checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] *
// data_type_size, NULL));
// checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels *
// input_h * input_w * data_type_size, hipMemcpyHostToDevice));
// checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size,
// hipMemcpyHostToDevice));
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEM(cnmemMalloc(&layer_input[i],
layer_input_size[i] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
}
checkCudaErrors(hipEventRecord(start_compute, stream_compute));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc,
cur_params->b, &alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W,
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1],
cur_params->C_out));
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W,
cur_params->C_out, (double *)layer_input[i], cur_params->C_in,
&Dbeta, (double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1],
cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
} else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
layer_input[i], cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space, cur_params->reserved_space_size));
} else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
checkCUDNN(cudnnBatchNormalizationForwardTraining(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->factor, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
} else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->output_tensor, layer_input[i + 1]));
} else if (layer_type[i] == ACTV) {
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1]));
} else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(hipDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to
// synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
i--;
}
// sync with stream_compute guaranteed
checkCudaErrors(hipEventRecord(stop_compute, stream_compute));
checkCudaErrors(hipEventSynchronize(stop_compute));
float compute_time = 0;
checkCudaErrors(
hipEventElapsedTime(&compute_time, start_compute, stop_compute));
fwd_computation_time.push_back(compute_time);
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
}
checkCNMEM(cnmemFree(layer_input[i], NULL));
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// ---------------------- vDNN end ------------------------
}
// time for loss compute ignored
// *scalar_loss = computeLoss();
// time for softmax backward ignored
// ---------------------- vDNN start ----------------------
// checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes
// * data_type_size, NULL));
// space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] *
// data_type_size);
// // std::cout << "Free bytes: " << free_bytes << std::endl;
// // ---------------------- vDNN end ------------------------
// if (layer_type[num_layers - 1] == SOFTMAX) {
// // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams
// *)params[num_layers - 1];
// if (data_type == CUDNN_DATA_FLOAT) {
// checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size *
// num_classes * sizeof(float)));
// softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW),
// BW>>>(this->y, (float *)layer_input[num_layers],
// (float
// *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
// }
// else if (data_type == CUDNN_DATA_DOUBLE) {
// checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size *
// num_classes * sizeof(double)));
// softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW),
// BW>>>(this->y, (double *)layer_input[num_layers],
// (double
// *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
// }
// }
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&layer_input[i],
layer_input_size[i] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&dlayer_input[i + 1],
layer_input_size[i] * data_type_size, NULL));
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
checkCNMEM(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL));
}
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
}
if (!(i + 1 < num_layers && layer_type[i + 1] == SOFTMAX))
checkCudaErrors(hipEventRecord(start_compute, stream_compute));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCUDNN(cudnnConvolutionBackwardBias(
cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1],
&beta, cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc,
cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->filter_desc, cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(
cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace,
cur_workspace_size, &beta, cur_params->input_tensor,
dlayer_input[i]));
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
cur_params->C_out, 1, batch_size, &Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size, &Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out,
cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1],
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha, (float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta, (float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1,
batch_size, &Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)one_vec, batch_size, &Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)layer_input[i],
cur_params->C_in, &Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha, (double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta, (double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
checkCUDNN(cudnnBatchNormalizationBackward(
cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale, cur_params->dscale,
cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean,
cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1], &beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(stop_compute, stream_compute));
checkCudaErrors(hipEventSynchronize(stop_compute));
float compute_time;
checkCudaErrors(
hipEventElapsedTime(&compute_time, start_compute, stop_compute));
bwd_computation_time.insert(bwd_computation_time.begin(), compute_time);
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
} else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
checkCNMEM(cnmemFree(layer_input[i], NULL));
if (i > 0 && layer_type[i] != SOFTMAX)
checkCNMEM(cnmemFree(dlayer_input[i], NULL));
}
}
void NeuralNet::getTransferTime(void *X, int *y, double learning_rate,
std::vector<float> &fwd_transfer_time,
std::vector<float> &bwd_transfer_time) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX) continue;
void *device_data;
void *host_data;
checkCNMEM(
cnmemMalloc(&device_data, layer_input_size[i] * data_type_size, NULL));
checkCudaErrors(
hipHostMalloc(&host_data, layer_input_size[i] * data_type_size));
checkCudaErrors(hipEventRecord(start_transfer, stream_memory));
checkCudaErrors(hipMemcpyAsync(host_data, device_data,
layer_input_size[i] * data_type_size,
hipMemcpyDeviceToHost, stream_memory));
checkCudaErrors(hipEventRecord(stop_transfer, stream_memory));
checkCudaErrors(hipEventSynchronize(stop_transfer));
float transfer_time;
checkCudaErrors(
hipEventElapsedTime(&transfer_time, start_transfer, stop_transfer));
fwd_transfer_time.push_back(transfer_time);
checkCudaErrors(hipEventRecord(start_transfer, stream_memory));
checkCudaErrors(hipMemcpyAsync(device_data, host_data,
layer_input_size[i] * data_type_size,
hipMemcpyHostToDevice, stream_memory));
checkCudaErrors(hipEventRecord(stop_transfer, stream_memory));
checkCudaErrors(hipEventSynchronize(stop_transfer));
checkCudaErrors(
hipEventElapsedTime(&transfer_time, start_transfer, stop_transfer));
bwd_transfer_time.push_back(transfer_time);
}
} | 59e8aa1e222b43fb4762dc4de45c156fd31fd29d.cu | #include "neural_net.h"
void NeuralNet::getComputationTime(void *X, int *y, double learning_rate,
std::vector<float> &fwd_computation_time,
std::vector<float> &bwd_computation_time) {
for (int i = 0; i < num_layers; i++) prefetched[i] = false;
// checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] *
// data_type_size, NULL));
// checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels *
// input_h * input_w * data_type_size, cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size,
// cudaMemcpyHostToDevice));
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
// forward propagate
for (int i = 0; i < num_layers; i++) {
size_t cur_workspace_size;
void *cur_workspace;
checkCNMEM(cnmemMalloc(&layer_input[i],
layer_input_size[i] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
}
checkCudaErrors(cudaEventRecord(start_compute, stream_compute));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, layer_input[i + 1]));
checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc,
cur_params->b, &alpha,
cur_params->output_tensor, layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
}
else if (layer_type[i] == FULLY_CONNECTED) {
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)params[i];
// std::cout << "FChere" << i << std::endl;
if (data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W,
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out,
(float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1],
cur_params->C_out));
} else if (data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W,
cur_params->C_out, (double *)layer_input[i], cur_params->C_in,
&Dbeta, (double *)layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out,
(double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1],
cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, layer_input[i + 1]));
}
} else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
layer_input[i], cur_params->input_tensor, layer_input[i + 1],
cur_params->reserved_space, cur_params->reserved_space_size));
} else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
checkCUDNN(cudnnBatchNormalizationForwardTraining(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale,
cur_params->bias, cur_params->factor, cur_params->running_mean,
cur_params->running_variance, cur_params->epsilon,
cur_params->result_save_mean, cur_params->result_save_inv_var));
} else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->output_tensor, layer_input[i + 1]));
} else if (layer_type[i] == ACTV) {
std::cout << "Panic!! ACTV wrong place\n";
exit(0);
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1]));
} else if (layer_type[i] == SOFTMAX) {
// std::cout << "Softmax\n";
std::cout << "Panic!! SOFTMAX wrong place\n";
exit(0);
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
}
// ---------------------- vDNN start ----------------------
// synchronization
// checkCudaErrors(cudaDeviceSynchronize());
// if next layer is ACTV or SOFTMAX, complete that and come to
// synchronization
// the case in above if for ACTV and SOFTMAX never occurs
if (layer_type[i + 1] == SOFTMAX) {
i++;
layer_input[i + 1] = layer_input[i];
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxForward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, layer_input[i + 1]));
i--;
}
// sync with stream_compute guaranteed
checkCudaErrors(cudaEventRecord(stop_compute, stream_compute));
checkCudaErrors(cudaEventSynchronize(stop_compute));
float compute_time = 0;
checkCudaErrors(
cudaEventElapsedTime(&compute_time, start_compute, stop_compute));
fwd_computation_time.push_back(compute_time);
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
}
checkCNMEM(cnmemFree(layer_input[i], NULL));
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) {
i = i + 1;
}
// ---------------------- vDNN end ------------------------
}
// time for loss compute ignored
// *scalar_loss = computeLoss();
// time for softmax backward ignored
// ---------------------- vDNN start ----------------------
// checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes
// * data_type_size, NULL));
// space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] *
// data_type_size);
// // std::cout << "Free bytes: " << free_bytes << std::endl;
// // ---------------------- vDNN end ------------------------
// if (layer_type[num_layers - 1] == SOFTMAX) {
// // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams
// *)params[num_layers - 1];
// if (data_type == CUDNN_DATA_FLOAT) {
// checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size *
// num_classes * sizeof(float)));
// softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW),
// BW>>>(this->y, (float *)layer_input[num_layers],
// (float
// *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
// }
// else if (data_type == CUDNN_DATA_DOUBLE) {
// checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size *
// num_classes * sizeof(double)));
// softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW),
// BW>>>(this->y, (double *)layer_input[num_layers],
// (double
// *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps);
// }
// }
for (int i = num_layers - 1; i >= 0; i--) {
// ---------------------- vDNN start ----------------------
size_t cur_filter_workspace_size, cur_data_workspace_size,
cur_workspace_size;
void *cur_workspace;
checkCNMEM(cnmemMalloc(&layer_input[i + 1],
layer_input_size[i + 1] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&layer_input[i],
layer_input_size[i] * data_type_size, NULL));
checkCNMEM(cnmemMalloc(&dlayer_input[i + 1],
layer_input_size[i] * data_type_size, NULL));
if (i > 0) {
if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) {
dlayer_input[i] = dlayer_input[i + 1];
} else {
checkCNMEM(cnmemMalloc(&dlayer_input[i],
layer_input_size[i] * data_type_size, NULL));
}
}
// ---------------------- vDNN end ------------------------
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
// allocate space for derivative
if (!pre_alloc_conv_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL));
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (!pre_alloc_fc_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
if (!pre_alloc_batch_norm_derivative) {
cur_params->cnmemAllocDerivatives(data_type_size, NULL);
}
}
if (!(i + 1 < num_layers && layer_type[i + 1] == SOFTMAX))
checkCudaErrors(cudaEventRecord(start_compute, stream_compute));
if (layer_type[i] == CONV) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
cur_filter_workspace_size = cur_params->bwd_filter_workspace_size;
if (i > 0)
cur_data_workspace_size = cur_params->bwd_data_workspace_size;
else
cur_data_workspace_size = 0;
// std::cout << "bwd cur_workspace_size: " << cur_workspace_size <<
// std::endl;
cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size)
? cur_filter_workspace_size
: cur_data_workspace_size;
checkCUDNN(cudnnConvolutionBackwardBias(
cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1],
&beta, cur_params->bias_desc, cur_params->db));
// std::cout << "neural_net: backward conv i:" << i << std::endl;
checkCUDNN(cudnnConvolutionBackwardFilter(
cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i],
cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc,
cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->filter_desc, cur_params->dW));
if (i > 0)
checkCUDNN(cudnnConvolutionBackwardData(
cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W,
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace,
cur_workspace_size, &beta, cur_params->input_tensor,
dlayer_input[i]));
// std::cout << "Free bytes: " << free_bytes << std::endl;
// std::cout << "here\n";
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == FULLY_CONNECTED) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
if (cur_params->activation_mode != ACTIVATION_NONE) {
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->output_tensor, layer_input[i + 1], &beta,
cur_params->output_tensor, dlayer_input[i + 1]));
}
if (data_type == CUDNN_DATA_FLOAT) {
// bias backward
checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
cur_params->C_out, 1, batch_size, &Salpha,
(float *)dlayer_input[i + 1], cur_params->C_out,
(float *)one_vec, batch_size, &Sbeta,
(float *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out,
cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1],
cur_params->C_out, (float *)layer_input[i], cur_params->C_in,
&Sbeta, (float *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Salpha, (float *)cur_params->W, cur_params->C_out,
(float *)dlayer_input[i + 1], cur_params->C_out,
&Sbeta, (float *)dlayer_input[i], cur_params->C_in));
}
else if (data_type == CUDNN_DATA_DOUBLE) {
// bias backward
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1,
batch_size, &Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)one_vec, batch_size, &Dbeta,
(double *)cur_params->db, cur_params->C_out));
// weight backward
checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T,
cur_params->C_out, cur_params->C_in, batch_size,
&Dalpha, (double *)dlayer_input[i + 1],
cur_params->C_out, (double *)layer_input[i],
cur_params->C_in, &Dbeta,
(double *)cur_params->dW, cur_params->C_out));
// data backward
if (i > 0)
checkCUBLAS(
cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
cur_params->C_in, batch_size, cur_params->C_out,
&Dalpha, (double *)cur_params->W, cur_params->C_out,
(double *)dlayer_input[i + 1], cur_params->C_out,
&Dbeta, (double *)dlayer_input[i], cur_params->C_in));
}
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == DROPOUT) {
DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i];
checkCUDNN(cudnnDropoutBackward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->reserved_space, cur_params->reserved_space_size));
}
else if (layer_type[i] == BATCHNORM) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
checkCUDNN(cudnnBatchNormalizationBackward(
cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta,
cur_params->input_tensor, layer_input[i], cur_params->input_tensor,
dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i],
cur_params->sbmv_desc, cur_params->scale, cur_params->dscale,
cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean,
cur_params->result_save_inv_var));
cur_params->stepParams(cublas_handle, learning_rate);
}
else if (layer_type[i] == POOLING) {
PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i];
checkCUDNN(
cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->output_tensor, layer_input[i + 1],
cur_params->output_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
}
else if (layer_type[i] == ACTV) {
ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i];
checkCUDNN(cudnnActivationBackward(
cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor,
layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1],
cur_params->input_tensor, layer_input[i], &beta,
cur_params->input_tensor, dlayer_input[i]));
continue;
}
else if (layer_type[i] == SOFTMAX) {
// std::cout << "compute here\n";
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i];
checkCUDNN(cudnnSoftmaxBackward(
cudnn_handle, cur_params->algo, cur_params->mode, &alpha,
cur_params->input_tensor, layer_input[i + 1],
cur_params->input_tensor, dlayer_input[i + 1], &beta,
cur_params->input_tensor, dlayer_input[i]));
// std::cout << "compute here\n";
continue;
}
// ---------------------- vDNN start ----------------------
// checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(stop_compute, stream_compute));
checkCudaErrors(cudaEventSynchronize(stop_compute));
float compute_time;
checkCudaErrors(
cudaEventElapsedTime(&compute_time, start_compute, stop_compute));
bwd_computation_time.insert(bwd_computation_time.begin(), compute_time);
if (layer_type[i] == CONV) {
checkCNMEM(cnmemFree(cur_workspace, NULL));
if (!pre_alloc_conv_derivative) {
ConvLayerParams *cur_params = (ConvLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
} else if (layer_type[i] == FULLY_CONNECTED) {
if (!pre_alloc_fc_derivative) {
FCLayerParams *cur_params = (FCLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
} else if (layer_type[i] == BATCHNORM) {
if (!pre_alloc_batch_norm_derivative) {
BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i];
cur_params->cnmemFreeDerivatives(NULL);
}
}
checkCNMEM(cnmemFree(layer_input[i + 1], NULL));
checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL));
checkCNMEM(cnmemFree(layer_input[i], NULL));
if (i > 0 && layer_type[i] != SOFTMAX)
checkCNMEM(cnmemFree(dlayer_input[i], NULL));
}
}
void NeuralNet::getTransferTime(void *X, int *y, double learning_rate,
std::vector<float> &fwd_transfer_time,
std::vector<float> &bwd_transfer_time) {
for (int i = 0; i < num_layers; i++) {
if (layer_type[i] == SOFTMAX) continue;
void *device_data;
void *host_data;
checkCNMEM(
cnmemMalloc(&device_data, layer_input_size[i] * data_type_size, NULL));
checkCudaErrors(
cudaMallocHost(&host_data, layer_input_size[i] * data_type_size));
checkCudaErrors(cudaEventRecord(start_transfer, stream_memory));
checkCudaErrors(cudaMemcpyAsync(host_data, device_data,
layer_input_size[i] * data_type_size,
cudaMemcpyDeviceToHost, stream_memory));
checkCudaErrors(cudaEventRecord(stop_transfer, stream_memory));
checkCudaErrors(cudaEventSynchronize(stop_transfer));
float transfer_time;
checkCudaErrors(
cudaEventElapsedTime(&transfer_time, start_transfer, stop_transfer));
fwd_transfer_time.push_back(transfer_time);
checkCudaErrors(cudaEventRecord(start_transfer, stream_memory));
checkCudaErrors(cudaMemcpyAsync(device_data, host_data,
layer_input_size[i] * data_type_size,
cudaMemcpyHostToDevice, stream_memory));
checkCudaErrors(cudaEventRecord(stop_transfer, stream_memory));
checkCudaErrors(cudaEventSynchronize(stop_transfer));
checkCudaErrors(
cudaEventElapsedTime(&transfer_time, start_transfer, stop_transfer));
bwd_transfer_time.push_back(transfer_time);
}
} |
2a40ea21bf11be6492d2b2f6acdedbc29607a97d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <limits>
#include <benchmark.h>
#define __PRINT__ cout << __PRETTY_FUNCTION__ << endl
constexpr unsigned int TPB = 512;
constexpr unsigned int NO_BLOCKS = 46;
constexpr unsigned int N = 1 << 20;
constexpr int numberOfPasses = 1;
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
int *a, *b;
int *da, *db, *dGlobalMax;
__host__ void fillData(int *data, const int length) {
for (int i=0; i<length; i++) {
data[i]= i;
}
data[static_cast<int>(length * 0.5)] = length;
}
__host__ void fillData(int *data, const int length, const unsigned int value) {
for (int i = 0; i<length; i++) {
data[i] = i;
}
}
__host__ void prepareData() {
// paged-locked allocation
constexpr unsigned int aSize = N * sizeof(int);
constexpr unsigned int bSize = NO_BLOCKS * sizeof(int);
hipHostMalloc((void**)&a, aSize, hipHostMallocDefault);
hipHostMalloc((void**)&b, bSize, hipHostMallocDefault);
fillData(a, N);
fillData(b, NO_BLOCKS, INT_MIN);
hipMalloc((void**)&da, aSize);
hipMalloc((void**)&db, aSize);
hipMalloc((void**)&dGlobalMax, sizeof(int));
hipMemcpy(da, a, aSize, hipMemcpyHostToDevice);
hipMemcpy(db, b, bSize, hipMemcpyHostToDevice);
}
__host__ void releaseData() {
hipFree(da);
hipFree(db);
hipFree(dGlobalMax);
hipHostFree(a);
hipHostFree(b);
}
template<bool MAKE_IF>
__global__ void kernel0(const int* __restrict__ data, const unsigned int dataLength, int* __restrict__ globalMax) {
// TODO: get max using atomic instruction
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int jump = gridDim.x * blockDim.x;
const int* threadData = (int*)data + idx;
while (idx < dataLength) {
if constexpr(MAKE_IF) {
if (*globalMax < *threadData) {
atomicMax(globalMax, data[idx]);
}
} else {
atomicMax(globalMax, data[idx]);
}
threadData += jump;
idx += jump;
}
}
template<bool MAKE_IF>
__global__ void kernel1(const int* __restrict__ data, const unsigned int dataLength, int* __restrict__ globalMax) {
// TODO: get max using atomic instruction
__shared__ int blockMax;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) blockMax = INT32_MIN;
__syncthreads();
const unsigned int jump = gridDim.x * blockDim.x;
const int* threadData = data + idx;
while (idx < dataLength) {
if constexpr(MAKE_IF) {
if (blockMax < *threadData) {
atomicMax(&blockMax, data[idx]);
}
} else {
atomicMax(&blockMax, data[idx]);
}
threadData += jump;
idx += jump;
}
if (threadIdx.x == 0) atomicMax(globalMax, blockMax);
}
template<bool MAKE_IF>
__host__ void testKernel0() {
dim3 blockSize(TPB, 1, 1);
dim3 gridSize(getNumberOfParts(N, TPB), 1, 1);
int globalMax = INT_MIN;
auto test = [&]() {
hipMemcpy(dGlobalMax, &globalMax, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel0<MAKE_IF>), dim3(gridSize), dim3(blockSize), 0, 0, da, N, dGlobalMax);
};
float gpuTime = GPUTIME(numberOfPasses, test());
hipDeviceSynchronize();
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__ , gpuTime);
hipMemcpy(&globalMax, dGlobalMax, sizeof(int), hipMemcpyDeviceToHost);
printf("\nMaximum: %d\n", globalMax);
}
template<bool MAKE_IF>
__host__ void testKernel1() {
dim3 blockSize(TPB, 1, 1);
dim3 gridSize(getNumberOfParts(N, TPB), 1, 1);
int globalMax = INT_MIN;
auto test = [&]() {
hipMemcpy(dGlobalMax, &globalMax, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel1<MAKE_IF>), dim3(gridSize), dim3(blockSize), 0, 0, da, N, dGlobalMax);
};
float gpuTime = GPUTIME(numberOfPasses, test());
hipDeviceSynchronize();
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__ , gpuTime);
hipMemcpy(&globalMax, dGlobalMax, sizeof(int), hipMemcpyDeviceToHost);
printf("\nMaximum: %d\n", globalMax);
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
prepareData();
// TODO: CALL kernel 0
testKernel0<true>();
testKernel0<false>();
testKernel1<true>();
testKernel1<false>();
releaseData();
releaseData();
return 0;
}
| 2a40ea21bf11be6492d2b2f6acdedbc29607a97d.cu | #include <cudaDefs.h>
#include <limits>
#include <benchmark.h>
#define __PRINT__ cout << __PRETTY_FUNCTION__ << endl
constexpr unsigned int TPB = 512;
constexpr unsigned int NO_BLOCKS = 46;
constexpr unsigned int N = 1 << 20;
constexpr int numberOfPasses = 1;
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
int *a, *b;
int *da, *db, *dGlobalMax;
__host__ void fillData(int *data, const int length) {
for (int i=0; i<length; i++) {
data[i]= i;
}
data[static_cast<int>(length * 0.5)] = length;
}
__host__ void fillData(int *data, const int length, const unsigned int value) {
for (int i = 0; i<length; i++) {
data[i] = i;
}
}
__host__ void prepareData() {
// paged-locked allocation
constexpr unsigned int aSize = N * sizeof(int);
constexpr unsigned int bSize = NO_BLOCKS * sizeof(int);
cudaHostAlloc((void**)&a, aSize, cudaHostAllocDefault);
cudaHostAlloc((void**)&b, bSize, cudaHostAllocDefault);
fillData(a, N);
fillData(b, NO_BLOCKS, INT_MIN);
cudaMalloc((void**)&da, aSize);
cudaMalloc((void**)&db, aSize);
cudaMalloc((void**)&dGlobalMax, sizeof(int));
cudaMemcpy(da, a, aSize, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, bSize, cudaMemcpyHostToDevice);
}
__host__ void releaseData() {
cudaFree(da);
cudaFree(db);
cudaFree(dGlobalMax);
cudaFreeHost(a);
cudaFreeHost(b);
}
template<bool MAKE_IF>
__global__ void kernel0(const int* __restrict__ data, const unsigned int dataLength, int* __restrict__ globalMax) {
// TODO: get max using atomic instruction
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int jump = gridDim.x * blockDim.x;
const int* threadData = (int*)data + idx;
while (idx < dataLength) {
if constexpr(MAKE_IF) {
if (*globalMax < *threadData) {
atomicMax(globalMax, data[idx]);
}
} else {
atomicMax(globalMax, data[idx]);
}
threadData += jump;
idx += jump;
}
}
template<bool MAKE_IF>
__global__ void kernel1(const int* __restrict__ data, const unsigned int dataLength, int* __restrict__ globalMax) {
// TODO: get max using atomic instruction
__shared__ int blockMax;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) blockMax = INT32_MIN;
__syncthreads();
const unsigned int jump = gridDim.x * blockDim.x;
const int* threadData = data + idx;
while (idx < dataLength) {
if constexpr(MAKE_IF) {
if (blockMax < *threadData) {
atomicMax(&blockMax, data[idx]);
}
} else {
atomicMax(&blockMax, data[idx]);
}
threadData += jump;
idx += jump;
}
if (threadIdx.x == 0) atomicMax(globalMax, blockMax);
}
template<bool MAKE_IF>
__host__ void testKernel0() {
dim3 blockSize(TPB, 1, 1);
dim3 gridSize(getNumberOfParts(N, TPB), 1, 1);
int globalMax = INT_MIN;
auto test = [&]() {
cudaMemcpy(dGlobalMax, &globalMax, sizeof(int), cudaMemcpyHostToDevice);
kernel0<MAKE_IF><<<gridSize, blockSize>>> (da, N, dGlobalMax);
};
float gpuTime = GPUTIME(numberOfPasses, test());
cudaDeviceSynchronize();
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__ , gpuTime);
cudaMemcpy(&globalMax, dGlobalMax, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMaximum: %d\n", globalMax);
}
template<bool MAKE_IF>
__host__ void testKernel1() {
dim3 blockSize(TPB, 1, 1);
dim3 gridSize(getNumberOfParts(N, TPB), 1, 1);
int globalMax = INT_MIN;
auto test = [&]() {
cudaMemcpy(dGlobalMax, &globalMax, sizeof(int), cudaMemcpyHostToDevice);
kernel1<MAKE_IF><<<gridSize, blockSize>>> (da, N, dGlobalMax);
};
float gpuTime = GPUTIME(numberOfPasses, test());
cudaDeviceSynchronize();
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__ , gpuTime);
cudaMemcpy(&globalMax, dGlobalMax, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMaximum: %d\n", globalMax);
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
prepareData();
// TODO: CALL kernel 0
testKernel0<true>();
testKernel0<false>();
testKernel1<true>();
testKernel1<false>();
releaseData();
releaseData();
return 0;
}
|
e9706cbd49eec8e23d5377c154d3448325947063.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "exclusive_scan.h"
void recursiveScan(float *input, float *output, int numInputs) {
const int scanGridSize = (numInputs + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (scanGridSize == 1) {
hipLaunchKernelGGL(( exclusiveScan), dim3(scanGridSize), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, input, output, nullptr, numInputs);
wbCheck(hipDeviceSynchronize());
return;
} else {
float *aux;
float *scannedAux;
wbCheck(hipMalloc((void **) &aux, scanGridSize * sizeof(float)));
wbCheck(hipMalloc((void **) &scannedAux, scanGridSize * sizeof(float)));
hipLaunchKernelGGL(( exclusiveScan), dim3(scanGridSize), dim3(BLOCK_SIZE), BLOCK_SIZE * sizeof(float), 0, input, output, aux, numInputs);
wbCheck(hipDeviceSynchronize());
recursiveScan(aux, scannedAux, scanGridSize);
int mergeGrids = (scanGridSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( auxMerge), dim3(mergeGrids), dim3(BLOCK_SIZE), 0, 0, scannedAux, output, numInputs);
wbCheck(hipDeviceSynchronize());
hipFree(scannedAux);
hipFree(aux);
}
}
__global__ void exclusiveScan(const float *input, float *output, float *S, int N) {
extern __shared__ float sharedInput[];
unsigned int tx = threadIdx.x;
int i = tx + blockIdx.x * blockDim.x;
if (i < N && i != 0) {
sharedInput[tx] = input[i - 1];
} else {
sharedInput[tx] = 0;
}
// Down phase
for (unsigned int stride = 1; stride < blockDim.x; stride <<= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx < blockDim.x) {
sharedInput[idx] += sharedInput[idx - stride];
}
}
// Up phase
for (int stride = blockDim.x / 4; stride > 0; stride >>= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx + stride < blockDim.x) {
sharedInput[idx + stride] += sharedInput[idx];
}
}
__syncthreads();
if (i < N) {
output[i] = sharedInput[tx];
if (S != NULL && tx == (BLOCK_SIZE - 1)) {
S[blockIdx.x] = sharedInput[tx];
}
}
}
__global__ void auxMerge(const float *offsets, float *input, int N) {
const unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int startIdx = tx * BLOCK_SIZE;
for (unsigned int i = 0; i < BLOCK_SIZE; i++) {
unsigned int idx = i + startIdx;
if (idx < N) {
input[idx] += offsets[tx];
}
}
}
| e9706cbd49eec8e23d5377c154d3448325947063.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "exclusive_scan.h"
void recursiveScan(float *input, float *output, int numInputs) {
const int scanGridSize = (numInputs + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (scanGridSize == 1) {
exclusiveScan<<<scanGridSize, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(input, output, nullptr, numInputs);
wbCheck(cudaDeviceSynchronize());
return;
} else {
float *aux;
float *scannedAux;
wbCheck(cudaMalloc((void **) &aux, scanGridSize * sizeof(float)));
wbCheck(cudaMalloc((void **) &scannedAux, scanGridSize * sizeof(float)));
exclusiveScan<<<scanGridSize, BLOCK_SIZE, BLOCK_SIZE * sizeof(float)>>>(input, output, aux, numInputs);
wbCheck(cudaDeviceSynchronize());
recursiveScan(aux, scannedAux, scanGridSize);
int mergeGrids = (scanGridSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
auxMerge<<<mergeGrids, BLOCK_SIZE>>>(scannedAux, output, numInputs);
wbCheck(cudaDeviceSynchronize());
cudaFree(scannedAux);
cudaFree(aux);
}
}
__global__ void exclusiveScan(const float *input, float *output, float *S, int N) {
extern __shared__ float sharedInput[];
unsigned int tx = threadIdx.x;
int i = tx + blockIdx.x * blockDim.x;
if (i < N && i != 0) {
sharedInput[tx] = input[i - 1];
} else {
sharedInput[tx] = 0;
}
// Down phase
for (unsigned int stride = 1; stride < blockDim.x; stride <<= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx < blockDim.x) {
sharedInput[idx] += sharedInput[idx - stride];
}
}
// Up phase
for (int stride = blockDim.x / 4; stride > 0; stride >>= 1) {
__syncthreads();
int idx = (tx + 1) * 2 * stride - 1;
if (idx + stride < blockDim.x) {
sharedInput[idx + stride] += sharedInput[idx];
}
}
__syncthreads();
if (i < N) {
output[i] = sharedInput[tx];
if (S != NULL && tx == (BLOCK_SIZE - 1)) {
S[blockIdx.x] = sharedInput[tx];
}
}
}
__global__ void auxMerge(const float *offsets, float *input, int N) {
const unsigned int tx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int startIdx = tx * BLOCK_SIZE;
for (unsigned int i = 0; i < BLOCK_SIZE; i++) {
unsigned int idx = i + startIdx;
if (idx < N) {
input[idx] += offsets[tx];
}
}
}
|
f639e1218c1f89a23c5241a0554992ed874156d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int are_vectors_equal(int* a, int* b, int n);
/* The old-fashioned CPU-only way to add two vectors */
void add_vectors_host(int *result, int *a, int *b, int n) {
for (int i=0; i<n; i++)
result[i] = a[i] + b[i];
}
/* The kernel that will execute on the GPU */
__global__ void add_vectors_kernel(int *result, int *a, int *b, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// If we have more threads than the magnitude of our vector, we need to
// make sure that the excess threads don't try to save results into
// unallocated memory.
if (idx < n)
result[idx] = a[idx] + b[idx];
}
/* This function encapsulates the process of creating and tearing down the
* environment used to execute our vector addition kernel. The steps of the
* process are:
* 1. Allocate memory on the device to hold our vectors
* 2. Copy the vectors to device memory
* 3. Execute the kernel
* 4. Retrieve the result vector from the device by copying it to the host
* 5. Free memory on the device
*/
void add_vectors_dev(int *result, int *a, int *b, int n) {
// Step 1: Allocate memory
int *a_dev, *b_dev, *result_dev;
// Since hipMalloc does not return a pointer like C's traditional malloc
// (it returns a success status instead), we provide as it's first argument
// the address of our device pointer variable so that it can change the
// value of our pointer to the correct device address.
hipMalloc((void **) &a_dev, sizeof(int) * n);
hipMalloc((void **) &b_dev, sizeof(int) * n);
hipMalloc((void **) &result_dev, sizeof(int) * n);
// Step 2: Copy the input vectors to the device
hipError_t err = hipMemcpy(a_dev, a, sizeof(int) * n, hipMemcpyHostToDevice);
if (err != hipSuccess)
printf("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!");
hipMemcpy(b_dev, b, sizeof(int) * n, hipMemcpyHostToDevice);
// Step 3: Invoke the kernel
// We allocate enough blocks (each 512 threads long) in the grid to
// accomodate all `n` elements in the vectors. The 512 long block size
// is somewhat arbitrary, but with the constraint that we know the
// hardware will support blocks of that size.
dim3 dimGrid((n + 512 - 1) / 512, 1, 1);
dim3 dimBlock(512, 1, 1);
hipLaunchKernelGGL(( add_vectors_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, result_dev, a_dev, b_dev, n);
// Step 4: Retrieve the results
hipMemcpy(result, result_dev, sizeof(int) * n, hipMemcpyDeviceToHost);
// Step 5: Free device memory
hipFree(a_dev);
hipFree(b_dev);
hipFree(result_dev);
}
void print_vector(int *array, int n) {
int i;
for (i=0; i<n; i++)
printf("%d ", array[i]);
printf("\n");
}
int main(void) {
int n = 5; // Length of the arrays
int a[] = {0, 1, 2, 3, 4};
int b[] = {5, 6, 7, 8, 9};
int host_result[5];
int device_result[5];
int l, i;
int* rand_a, *rand_b, *rand_host_result, *rand_device_result;
clock_t start, stop;
double gpu_time, cpu_time;
printf("Please enter vector length: ");
scanf("%d", &l);
rand_a = (int*) malloc(sizeof(int)*l);
rand_b = (int*) malloc(sizeof(int)*l);
rand_host_result = (int*) malloc(sizeof(int)*l);
rand_device_result = (int*) malloc(sizeof(int)*l);
printf("The CPU's answer: ");
add_vectors_host(host_result, a, b, n);
print_vector(host_result, n);
printf("The GPU's answer: ");
add_vectors_dev(device_result, a, b, n);
print_vector(device_result, n);
printf("Generating vectors of length %d... \n", l);
for(i=0; i<l; ++i) {
rand_a[i] = rand() % 10;
rand_b[i] = rand() % 10;
//printf("%d: %d, %d \n", i, rand_a[i], rand_b[i]);
}
start = clock();
add_vectors_host(rand_host_result, rand_a, rand_b, l);
stop = clock();
cpu_time = (double) (stop-start)/CLOCKS_PER_SEC;
start = clock();
add_vectors_dev(rand_device_result, rand_a, rand_b, l);
stop = clock();
gpu_time = (double) (stop-start)/CLOCKS_PER_SEC;
//print_vector(rand_host_result, l);
printf("CPU compute time: %f", cpu_time);
printf("\n");
printf("GPU compute time: %f", gpu_time);
printf("\n");
printf("Ratio: %f", cpu_time / gpu_time);
printf("\n");
if(!are_vectors_equal(rand_host_result, rand_device_result, l)) {
printf("WARNING! Host and device results do not agree");
}
free(rand_a);
free(rand_b);
return 0;
}
int are_vectors_equal(int* a, int* b, int n) {
// Return 1 if vectors a and be are equal, else return 0.
int i;
for (i=0; i<n; ++i) {
if (a[i] != b[i])
return 0;
}
return 1;
}
| f639e1218c1f89a23c5241a0554992ed874156d3.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
int are_vectors_equal(int* a, int* b, int n);
/* The old-fashioned CPU-only way to add two vectors */
void add_vectors_host(int *result, int *a, int *b, int n) {
for (int i=0; i<n; i++)
result[i] = a[i] + b[i];
}
/* The kernel that will execute on the GPU */
__global__ void add_vectors_kernel(int *result, int *a, int *b, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// If we have more threads than the magnitude of our vector, we need to
// make sure that the excess threads don't try to save results into
// unallocated memory.
if (idx < n)
result[idx] = a[idx] + b[idx];
}
/* This function encapsulates the process of creating and tearing down the
* environment used to execute our vector addition kernel. The steps of the
* process are:
* 1. Allocate memory on the device to hold our vectors
* 2. Copy the vectors to device memory
* 3. Execute the kernel
* 4. Retrieve the result vector from the device by copying it to the host
* 5. Free memory on the device
*/
void add_vectors_dev(int *result, int *a, int *b, int n) {
// Step 1: Allocate memory
int *a_dev, *b_dev, *result_dev;
// Since cudaMalloc does not return a pointer like C's traditional malloc
// (it returns a success status instead), we provide as it's first argument
// the address of our device pointer variable so that it can change the
// value of our pointer to the correct device address.
cudaMalloc((void **) &a_dev, sizeof(int) * n);
cudaMalloc((void **) &b_dev, sizeof(int) * n);
cudaMalloc((void **) &result_dev, sizeof(int) * n);
// Step 2: Copy the input vectors to the device
cudaError_t err = cudaMemcpy(a_dev, a, sizeof(int) * n, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
printf("ERROR!!!!!!!!!!!!!!!!!!!!!!!!!!!");
cudaMemcpy(b_dev, b, sizeof(int) * n, cudaMemcpyHostToDevice);
// Step 3: Invoke the kernel
// We allocate enough blocks (each 512 threads long) in the grid to
// accomodate all `n` elements in the vectors. The 512 long block size
// is somewhat arbitrary, but with the constraint that we know the
// hardware will support blocks of that size.
dim3 dimGrid((n + 512 - 1) / 512, 1, 1);
dim3 dimBlock(512, 1, 1);
add_vectors_kernel<<<dimGrid, dimBlock>>>(result_dev, a_dev, b_dev, n);
// Step 4: Retrieve the results
cudaMemcpy(result, result_dev, sizeof(int) * n, cudaMemcpyDeviceToHost);
// Step 5: Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(result_dev);
}
void print_vector(int *array, int n) {
int i;
for (i=0; i<n; i++)
printf("%d ", array[i]);
printf("\n");
}
int main(void) {
int n = 5; // Length of the arrays
int a[] = {0, 1, 2, 3, 4};
int b[] = {5, 6, 7, 8, 9};
int host_result[5];
int device_result[5];
int l, i;
int* rand_a, *rand_b, *rand_host_result, *rand_device_result;
clock_t start, stop;
double gpu_time, cpu_time;
printf("Please enter vector length: ");
scanf("%d", &l);
rand_a = (int*) malloc(sizeof(int)*l);
rand_b = (int*) malloc(sizeof(int)*l);
rand_host_result = (int*) malloc(sizeof(int)*l);
rand_device_result = (int*) malloc(sizeof(int)*l);
printf("The CPU's answer: ");
add_vectors_host(host_result, a, b, n);
print_vector(host_result, n);
printf("The GPU's answer: ");
add_vectors_dev(device_result, a, b, n);
print_vector(device_result, n);
printf("Generating vectors of length %d... \n", l);
for(i=0; i<l; ++i) {
rand_a[i] = rand() % 10;
rand_b[i] = rand() % 10;
//printf("%d: %d, %d \n", i, rand_a[i], rand_b[i]);
}
start = clock();
add_vectors_host(rand_host_result, rand_a, rand_b, l);
stop = clock();
cpu_time = (double) (stop-start)/CLOCKS_PER_SEC;
start = clock();
add_vectors_dev(rand_device_result, rand_a, rand_b, l);
stop = clock();
gpu_time = (double) (stop-start)/CLOCKS_PER_SEC;
//print_vector(rand_host_result, l);
printf("CPU compute time: %f", cpu_time);
printf("\n");
printf("GPU compute time: %f", gpu_time);
printf("\n");
printf("Ratio: %f", cpu_time / gpu_time);
printf("\n");
if(!are_vectors_equal(rand_host_result, rand_device_result, l)) {
printf("WARNING! Host and device results do not agree");
}
free(rand_a);
free(rand_b);
return 0;
}
int are_vectors_equal(int* a, int* b, int n) {
// Return 1 if vectors a and be are equal, else return 0.
int i;
for (i=0; i<n; ++i) {
if (a[i] != b[i])
return 0;
}
return 1;
}
|
52c2f9be6287ff9962a66f18335bf532ef252edf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputeDistanceKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int inputSize = XSIZE*YSIZE;
float *distance = NULL;
hipMalloc(&distance, XSIZE*YSIZE);
float *dimensionWeight = NULL;
hipMalloc(&dimensionWeight, XSIZE*YSIZE);
int maxCells = 1;
float *difference = NULL;
hipMalloc(&difference, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputeDistanceKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,distance,dimensionWeight,maxCells,difference);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputeDistanceKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,distance,dimensionWeight,maxCells,difference);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputeDistanceKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, inputSize,distance,dimensionWeight,maxCells,difference);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 52c2f9be6287ff9962a66f18335bf532ef252edf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputeDistanceKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int inputSize = XSIZE*YSIZE;
float *distance = NULL;
cudaMalloc(&distance, XSIZE*YSIZE);
float *dimensionWeight = NULL;
cudaMalloc(&dimensionWeight, XSIZE*YSIZE);
int maxCells = 1;
float *difference = NULL;
cudaMalloc(&difference, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputeDistanceKernel<<<gridBlock,threadBlock>>>(inputSize,distance,dimensionWeight,maxCells,difference);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputeDistanceKernel<<<gridBlock,threadBlock>>>(inputSize,distance,dimensionWeight,maxCells,difference);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputeDistanceKernel<<<gridBlock,threadBlock>>>(inputSize,distance,dimensionWeight,maxCells,difference);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9b3941b524930dd751946f989fa15f41d7799443.hip | // !!! This is a file automatically generated by hipify!!!
#include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 512
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
float *output) {
// Dynamic shared memory, size provided at kernel launch.
extern __shared__ float loss_per_row[];
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_a += y * ncol;
input_b += y * ncol;
float maxval = *input_a;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[x] - maxval);
}
// Compute per-row loss.
float loss = 0;
for (int x = 0; x < ncol; ++x) {
loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum);
}
loss_per_row[y] = loss;
__syncthreads();
// Compute reduce_mean across rows.
float mean_loss = 0;
// Use a single thread to reduce mean across rows.
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i = 0; i < nrow; ++i) {
mean_loss += loss_per_row[i];
}
mean_loss /= nrow;
output[0] = mean_loss;
}
}
// arr, value
// arr[:] = value
__global__ void array_set(float *output, const float input, int64_t n)
{
int out_index = blockDim.x * blockIdx.x + threadIdx.x;
if (out_index < n) {
output[out_index] = input;
}
}
int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
hipLaunchKernelGGL(( array_set), dim3(500), dim3(size/500), 0, 0, (float *)arr->data, value, size);
return 0;
}
// input, output
// output[:,] = input
__global__ void array_broadcast(float *output, const float *input, int64_t size, int new_dimension){
int id = threadIdx.x;
int stride = blockDim.x;
for (int i = id; i < new_dimension; i += stride) {
memcpy(output + i * size, input, sizeof(float) * size);
}
}
int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
int new_dimension = output->shape[0];
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
hipLaunchKernelGGL(( array_broadcast), dim3(1), dim3(1024), 0, 0, output_data, input_data, size, new_dimension);
return 0;
}
__global__ void value_add_keneral(float *input, float *output, int64_t size, int64_t rows)
{
int stride = blockDim.x;
int id = threadIdx.x;
for(int i= id; i<size; i+=stride){
float v = 0.0;
for (int j=0; j<rows; j++)
{
v += input[j*size+i];
}
output[i]=v;
}
}
// output = input.sum(axis=0)
int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size =input->shape[1] * input->shape[2];
hipLaunchKernelGGL(( value_add_keneral), dim3(1), dim3(1024), 0, 0, (float *)input->data, (float *)output->data, size, input->shape[0]);
return 0;
}
__global__ void matAdd(float * A, float *B, float *C, int64_t size)
{
int begin = threadIdx.x;
int stride = blockDim.x;
for (int i=begin; i<size; i+=stride){
C[i] = A[i]+B[i];
}
}
// output = matA+matB
int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA,
const DLArrayHandle matB, DLArrayHandle output) {
/* TODO: Your code here */
// dim3 thread(matA.shape[0], matA.shape[1]);
int64_t size = 1;
for (int i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
hipLaunchKernelGGL(( matAdd), dim3(1), dim3(1024), 0, 0, (float *) matA->data, (float *) matB->data, (float *) output->data, size);
return 0;
}
__global__ void elementAdd(float *input, float value, float *output, int64_t size)
{
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b]+value;
}
}
int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val,
DLArrayHandle output)
{ /* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
hipLaunchKernelGGL(( elementAdd), dim3(1), dim3(1024), 0, 0, (float *)input->data, val, (float *)output->data, size);
return 0;
}
__global__ void MatMultiply(float * A, float *B, float *C, int64_t size)
{
int begin = threadIdx.x;
int stride = blockDim.x;
for (int i=begin; i<size; i+=stride)
{
C[i] = A[i]*B[i];
}
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
hipLaunchKernelGGL(( MatMultiply), dim3(1), dim3(1024), 0, 0, (float *)matA->data, (float *)matB->data, (float *)output->data, size);
return 0;
}
__global__ void elementMultiply(float *input, float value, float *output, int64_t size)
{
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b]*value;
}
}
int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
hipLaunchKernelGGL(( elementMultiply), dim3(1), dim3(1024), 0, 0, (float *)input->data, val, (float *)output->data, size);
return 0;
}
int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA,
const DLArrayHandle matB, bool transposeB,
DLArrayHandle matC) {
/* TODO: Your code here */
// Hint: use cublas
// cublas assume matrix is column major
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasOperation_t trans_a = HIPBLAS_OP_N, trans_b = HIPBLAS_OP_N;
int m = matC->shape[0], k = matA->shape[1], n = matC->shape[1];
const float alpha = 1.0;
const float beta = 0.0;
if (transposeA) {
trans_a = HIPBLAS_OP_T;
k = matA->shape[0];
}
if (transposeB) {
trans_b = HIPBLAS_OP_T;
}
hipblasSgemm(handle, trans_b, trans_a,
n, m, k, &alpha,
(const float *)matB->data, transposeB ? k : n,
(const float *)matA->data, transposeA ? m : k,
&beta, (float *)matC->data, n);
return 0;
}
__global__ void relu(float *input, float *output, int64_t size) {
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = max(input[b], 0.0f);
}
}
// ln(1+e^x)
int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
hipLaunchKernelGGL(( relu), dim3(1), dim3(1024), 0, 0, (float *)input->data, (float *)output->data, size);
return 0;
}
__global__ void relu_gradient(float *input, float *in_grad, float *output, int64_t size){
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b] > 0 ? in_grad[b]: 0.0;
}
}
// 1/(1+e^(-x))
int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
hipLaunchKernelGGL(( relu_gradient), dim3(1), dim3(1024), 0, 0, (float *)input->data, (float *) in_grad->data, (float *)output->data, size);
return 0;
}
__global__ void softmax(float *input, float *output, int nrow, int ncol) {
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<nrow; b+= stride){
float sum = 0;
for (int m=0; m<ncol; m++)
{
sum += exp(input[b*ncol+m]);
}
for (int m=0; m<ncol; m++) {
output[b*ncol+m] = exp(input[b*ncol+m])/sum;
}
}
}
// e^x[0]/sum(e^x[i])
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t nrow = input->shape[0];
int64_t ncol = input->shape[1];
hipLaunchKernelGGL(( softmax), dim3(1), dim3(1024), 0, 0, (float *) input->data, (float *) output->data, nrow, ncol);
return 0;
}
int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a,
const DLArrayHandle input_b,
DLArrayHandle output) {
assert(input_a->ndim == 2);
assert(input_b->ndim == 2);
assert(output->ndim == 1);
assert(input_a->shape[0] == input_b->shape[0] &&
input_a->shape[1] == input_b->shape[1]);
int nrow = input_a->shape[0];
// Maximum x- or y-dimension of a block = 1024
// But we need 'nrow' shared memory, and max shared memory is 48KB.
// Conservatively allow max 16KB shared memory.
assert(nrow <= 1024 * 4);
int ncol = input_a->shape[1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
} else {
threads.x = 1024;
threads.y = (nrow + 1023) / 1024;
}
// 1 block, each block with 'threads' number of threads with 'nrow' shared
// memory size
hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(1), dim3(threads), nrow * sizeof(float), 0,
nrow, ncol, input_data_a, input_data_b, output_data);
return 0;
}
| 9b3941b524930dd751946f989fa15f41d7799443.cu | #include "./c_runtime_api.h"
#include <cassert>
#include <cstdio>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 512
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
float *output) {
// Dynamic shared memory, size provided at kernel launch.
extern __shared__ float loss_per_row[];
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x +
threadIdx.x;
if (y >= nrow) {
return;
}
input_a += y * ncol;
input_b += y * ncol;
float maxval = *input_a;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[x] - maxval);
}
// Compute per-row loss.
float loss = 0;
for (int x = 0; x < ncol; ++x) {
loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum);
}
loss_per_row[y] = loss;
__syncthreads();
// Compute reduce_mean across rows.
float mean_loss = 0;
// Use a single thread to reduce mean across rows.
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i = 0; i < nrow; ++i) {
mean_loss += loss_per_row[i];
}
mean_loss /= nrow;
output[0] = mean_loss;
}
}
// arr, value
// arr[:] = value
__global__ void array_set(float *output, const float input, int64_t n)
{
int out_index = blockDim.x * blockIdx.x + threadIdx.x;
if (out_index < n) {
output[out_index] = input;
}
}
int DLGpuArraySet(DLArrayHandle arr, float value) { /* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < arr->ndim; i++) {
size *= arr->shape[i];
}
array_set<<<500, size/500>>>((float *)arr->data, value, size);
return 0;
}
// input, output
// output[:,] = input
__global__ void array_broadcast(float *output, const float *input, int64_t size, int new_dimension){
int id = threadIdx.x;
int stride = blockDim.x;
for (int i = id; i < new_dimension; i += stride) {
memcpy(output + i * size, input, sizeof(float) * size);
}
}
int DLGpuBroadcastTo(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
int new_dimension = output->shape[0];
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
array_broadcast<<<1, 1024>>>(output_data, input_data, size, new_dimension);
return 0;
}
__global__ void value_add_keneral(float *input, float *output, int64_t size, int64_t rows)
{
int stride = blockDim.x;
int id = threadIdx.x;
for(int i= id; i<size; i+=stride){
float v = 0.0;
for (int j=0; j<rows; j++)
{
v += input[j*size+i];
}
output[i]=v;
}
}
// output = input.sum(axis=0)
int DLGpuReduceSumAxisZero(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size =input->shape[1] * input->shape[2];
value_add_keneral<<<1, 1024>>>((float *)input->data, (float *)output->data, size, input->shape[0]);
return 0;
}
__global__ void matAdd(float * A, float *B, float *C, int64_t size)
{
int begin = threadIdx.x;
int stride = blockDim.x;
for (int i=begin; i<size; i+=stride){
C[i] = A[i]+B[i];
}
}
// output = matA+matB
int DLGpuMatrixElementwiseAdd(const DLArrayHandle matA,
const DLArrayHandle matB, DLArrayHandle output) {
/* TODO: Your code here */
// dim3 thread(matA.shape[0], matA.shape[1]);
int64_t size = 1;
for (int i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
matAdd<<<1, 1024>>>((float *) matA->data, (float *) matB->data, (float *) output->data, size);
return 0;
}
__global__ void elementAdd(float *input, float value, float *output, int64_t size)
{
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b]+value;
}
}
int DLGpuMatrixElementwiseAddByConst(const DLArrayHandle input, float val,
DLArrayHandle output)
{ /* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
elementAdd<<<1, 1024>>>((float *)input->data, val, (float *)output->data, size);
return 0;
}
__global__ void MatMultiply(float * A, float *B, float *C, int64_t size)
{
int begin = threadIdx.x;
int stride = blockDim.x;
for (int i=begin; i<size; i+=stride)
{
C[i] = A[i]*B[i];
}
}
int DLGpuMatrixElementwiseMultiply(const DLArrayHandle matA,
const DLArrayHandle matB,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < matA->ndim; i++) {
size *= matA->shape[i];
}
MatMultiply<<<1, 1024>>>((float *)matA->data, (float *)matB->data, (float *)output->data, size);
return 0;
}
__global__ void elementMultiply(float *input, float value, float *output, int64_t size)
{
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b]*value;
}
}
int DLGpuMatrixMultiplyByConst(const DLArrayHandle input, float val,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
elementMultiply<<<1, 1024>>>((float *)input->data, val, (float *)output->data, size);
return 0;
}
int DLGpuMatrixMultiply(const DLArrayHandle matA, bool transposeA,
const DLArrayHandle matB, bool transposeB,
DLArrayHandle matC) {
/* TODO: Your code here */
// Hint: use cublas
// cublas assume matrix is column major
cublasHandle_t handle;
cublasCreate(&handle);
cublasOperation_t trans_a = CUBLAS_OP_N, trans_b = CUBLAS_OP_N;
int m = matC->shape[0], k = matA->shape[1], n = matC->shape[1];
const float alpha = 1.0;
const float beta = 0.0;
if (transposeA) {
trans_a = CUBLAS_OP_T;
k = matA->shape[0];
}
if (transposeB) {
trans_b = CUBLAS_OP_T;
}
cublasSgemm(handle, trans_b, trans_a,
n, m, k, &alpha,
(const float *)matB->data, transposeB ? k : n,
(const float *)matA->data, transposeA ? m : k,
&beta, (float *)matC->data, n);
return 0;
}
__global__ void relu(float *input, float *output, int64_t size) {
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = max(input[b], 0.0f);
}
}
// ln(1+e^x)
int DLGpuRelu(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
relu<<<1, 1024>>>((float *)input->data, (float *)output->data, size);
return 0;
}
__global__ void relu_gradient(float *input, float *in_grad, float *output, int64_t size){
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<size; b+= stride){
output[b] = input[b] > 0 ? in_grad[b]: 0.0;
}
}
// 1/(1+e^(-x))
int DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
DLArrayHandle output) {
/* TODO: Your code here */
int64_t size = 1;
for (int i = 0; i < input->ndim; i++) {
size *= input->shape[i];
}
relu_gradient<<<1, 1024>>>((float *)input->data, (float *) in_grad->data, (float *)output->data, size);
return 0;
}
__global__ void softmax(float *input, float *output, int nrow, int ncol) {
int i = threadIdx.x;
int stride = blockDim.x;
for (int b=i; b<nrow; b+= stride){
float sum = 0;
for (int m=0; m<ncol; m++)
{
sum += exp(input[b*ncol+m]);
}
for (int m=0; m<ncol; m++) {
output[b*ncol+m] = exp(input[b*ncol+m])/sum;
}
}
}
// e^x[0]/sum(e^x[i])
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output) {
/* TODO: Your code here */
int64_t nrow = input->shape[0];
int64_t ncol = input->shape[1];
softmax<<<1, 1024>>>((float *) input->data, (float *) output->data, nrow, ncol);
return 0;
}
int DLGpuSoftmaxCrossEntropy(const DLArrayHandle input_a,
const DLArrayHandle input_b,
DLArrayHandle output) {
assert(input_a->ndim == 2);
assert(input_b->ndim == 2);
assert(output->ndim == 1);
assert(input_a->shape[0] == input_b->shape[0] &&
input_a->shape[1] == input_b->shape[1]);
int nrow = input_a->shape[0];
// Maximum x- or y-dimension of a block = 1024
// But we need 'nrow' shared memory, and max shared memory is 48KB.
// Conservatively allow max 16KB shared memory.
assert(nrow <= 1024 * 4);
int ncol = input_a->shape[1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
} else {
threads.x = 1024;
threads.y = (nrow + 1023) / 1024;
}
// 1 block, each block with 'threads' number of threads with 'nrow' shared
// memory size
matrix_softmax_cross_entropy_kernel<<<1, threads, nrow * sizeof(float)>>>(
nrow, ncol, input_data_a, input_data_b, output_data);
return 0;
}
|
5f172721542be0848d1cf92dce9e7574f5874e48.hip | // !!! This is a file automatically generated by hipify!!!
#include "target_fn.h"
#include <hip/hip_runtime_api.h>
#include "../cpu/MatrixCover.h"
#include "../gpu-mg/MatrixCoverGPU.cuh"
// #include "../gpu/MatrixCoverGPU.cuh"
#define THRESHOLD 100
MeasureTimer Invoke_ORIGINAL_CPU(DataSet *dataset, bool print_result) {
MeasureTimer timer;
int total_col = dataset->total_dl_matrix_col_num;
int total_row = dataset->total_dl_matrix_row_num;
std::vector<int> results(total_row, 0);
std::vector<int> deleted_cols(total_col, 0);
int **dl_matrix = new int *[total_row];
int **next_row = new int *[total_row];
for (int i = 0; i < total_row; i++) {
dl_matrix[i] = new int[total_col];
next_row[i] = new int[total_col];
for (int j = 0; j < total_col; j++) {
dl_matrix[i][j] = dataset->dl_matrix[i * total_col + j];
next_row[i][j] = dataset->next_row[i * total_col + j];
}
}
int hard_conflict_threshold = THRESHOLD;
timer.StartCoreTime();
mc_solver(dl_matrix, next_row, results.data(), deleted_cols.data(),
dataset->col_group.data(), dataset->vertex_num, total_row,
total_col, hard_conflict_threshold);
timer.EndCoreTime();
dataset->final_result.clear();
for (int i = 0; i < total_row; i++) {
if (results[i] > 0) {
if (i + 1 > 3 * dataset->vertex_num) {
dataset->final_result.push_back(i + 2);
} else {
dataset->final_result.push_back(i + 1);
}
}
}
if (print_result) {
int conflict_count = 0;
for (int i = 0; i < total_row; i++) {
std::cout << results[i] << ' ';
}
std::cout << std::endl;
for (int i = 0; i < total_row; i++) {
if (results[i] > 0) {
std::cout << i << ' ';
}
}
std::cout << std::endl;
for (int i = 0; i < total_col; i++) {
if (deleted_cols[i] == -1) {
conflict_count++;
}
}
std::cout << "Conflict Num is " << conflict_count / 3 << std::endl;
}
for (int i = 0; i < total_row; i++) {
delete[] dl_matrix[i];
delete[] next_row[i];
}
delete[] dl_matrix;
delete[] next_row;
return timer;
}
// MeasureTimer Invoke_ORIGINAL_GPU(DataSet *dataset, bool print_result) {
// MeasureTimer timer;
// timer.StartDataLoadTime();
// int total_col = dataset->total_dl_matrix_col_num;
// int total_row = dataset->total_dl_matrix_row_num;
// std::vector<int> deleted_cols(total_col, 0);
// std::vector<int> deleted_rows(total_row, 0);
// int thread_size = 32;
// int conflict_count = 0;
// int vertex_num = dataset->vertex_num;
// int vertex_num_gpu = vertex_num;
// int total_dl_matrix_row_num = total_row;
// int total_dl_matrix_col_num = total_col;
// int total_dl_matrix_row_num_gpu = total_row;
// int total_dl_matrix_col_num_gpu = total_col;
// // allocate necessary vectors and matrix on GPU
// int *dl_matrix_gpu;
// int *deleted_cols_gpu;
// int *col_group_gpu;
// int *results_gpu;
// int *conflict_count_gpu;
// int *deleted_rows_gpu;
// int *row_group_gpu;
// // dl_matrix_gpu = new int *[total_dl_matrix_row_num];
// hipMalloc(&dl_matrix_gpu,
// sizeof(int) * total_dl_matrix_row_num *
// total_dl_matrix_col_num);
// hipMemcpy(dl_matrix_gpu, dataset->dl_matrix.data(),
// sizeof(int) * total_dl_matrix_row_num * total_dl_matrix_col_num,
// hipMemcpyHostToDevice);
// hipMalloc(&deleted_cols_gpu, sizeof(int) * total_dl_matrix_col_num);
// hipMalloc(&col_group_gpu, sizeof(int) * total_dl_matrix_col_num);
// hipMalloc(&results_gpu, sizeof(int) * total_dl_matrix_row_num);
// hipMalloc(&conflict_count_gpu, sizeof(int) * total_dl_matrix_col_num);
// hipMalloc(&deleted_rows_gpu, sizeof(int) * total_dl_matrix_row_num);
// hipMalloc(&row_group_gpu, sizeof(int) * total_dl_matrix_row_num);
// std::vector<int> row_group(total_dl_matrix_row_num_gpu, 0);
// // get col and row group
// gpu::init_vectors<<<1, thread_size>>>(row_group_gpu,
// total_dl_matrix_row_num_gpu);
// hipMemcpy(row_group.data(), row_group_gpu,
// sizeof(int) * total_dl_matrix_row_num_gpu,
// hipMemcpyDeviceToHost);
// if (print_result) {
// std::cout << "print row group" << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << row_group[i] << ' ';
// }
// std::cout << std::endl;
// }
// gpu::get_vertex_row_group<<<1, thread_size>>>(
// row_group_gpu, dl_matrix_gpu, vertex_num_gpu,
// total_dl_matrix_row_num_gpu,
// total_dl_matrix_col_num_gpu);
// hipMemcpy(row_group.data(), row_group_gpu,
// sizeof(int) * total_dl_matrix_row_num_gpu,
// hipMemcpyDeviceToHost);
// if (print_result) {
// std::cout << "print row group" << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << row_group[i] << ' ';
// }
// std::cout << std::endl;
// }
// hipMemcpy(col_group_gpu, dataset->col_group.data(),
// sizeof(int) * total_dl_matrix_col_num, hipMemcpyHostToDevice);
// timer.EndDataLoadTime();
// timer.StartCoreTime();
// hipProfilerStart();
// gpu::mc_solver(dl_matrix_gpu, results_gpu, deleted_cols_gpu,
// deleted_rows_gpu,
// col_group_gpu, row_group_gpu, conflict_count_gpu,
// vertex_num_gpu, total_dl_matrix_row_num_gpu,
// total_dl_matrix_col_num_gpu);
// hipDeviceSynchronize();
// hipProfilerStop();
// timer.EndCoreTime();
// std::vector<int> results(total_dl_matrix_row_num);
// hipMemcpy(results.data(), results_gpu, sizeof(int) *
// total_dl_matrix_row_num,
// hipMemcpyDeviceToHost);
// hipMemcpy(deleted_cols.data(), deleted_cols_gpu,
// sizeof(int) * total_dl_matrix_col_num, hipMemcpyDeviceToHost);
// dataset->final_result.clear();
// for (int i = 0; i < total_row; i++) {
// if (results[i] > 0) {
// // std::cout<<"debug"<<dataset->final_result.empty()<<std::endl;
// if (i + 1 > 3 * dataset->vertex_num) {
// dataset->final_result.push_back(i + 2);
// } else {
// dataset->final_result.push_back(i + 1);
// }
// }
// }
// if (print_result) {
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << results[i] << ' ';
// }
// std::cout << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// if (results[i] > 0) {
// std::cout << i << ' ';
// }
// }
// std::cout << std::endl;
// for (int i = 0; i < total_dl_matrix_col_num; i++) {
// if (deleted_cols[i] == -1) {
// conflict_count++;
// }
// }
// std::cout << "Conflict Num is " << conflict_count / 3 << std::endl;
// }
// hipFree(dl_matrix_gpu);
// hipFree(deleted_cols_gpu);
// hipFree(col_group_gpu);
// hipFree(results_gpu);
// hipFree(conflict_count_gpu);
// hipFree(deleted_rows_gpu);
// hipFree(row_group_gpu);
// return timer;
// }
MeasureTimer Invoke_ORIGINAL_GPU_MG(DataSets *datasets, bool print_result) {
MeasureTimer timer;
int total_row = 0, total_col = 0;
int n = datasets->graph_count;
for (int i = 0; i < n; ++i) {
total_row += datasets->total_dl_matrix_row_num[i];
total_col += datasets->total_dl_matrix_col_num[i];
}
int total_matrix = datasets->dl_matrix.size();
std::vector<int> deleted_cols(total_col, 0);
std::vector<int> deleted_rows(total_row, 0);
std::vector<int> conflict_count(n, 0);
bool *bool_dl_matrix = new bool[datasets->bool_dl_matrix.size()];
bool *bool_transpose_dl_matrix =
new bool[datasets->bool_transpose_dl_matrix.size()];
for (int i = 0; i < datasets->bool_dl_matrix.size(); ++i) {
bool_dl_matrix[i] = datasets->bool_dl_matrix[i];
bool_transpose_dl_matrix[i] = datasets->bool_transpose_dl_matrix[i];
}
timer.StartDataLoadTime();
bool *dl_matrix_gpu;
bool *transpose_dl_matrix_gpu;
int *next_col_gpu;
int *next_row_gpu;
int *results_gpu;
int *conflict_edge_gpu;
// hipMalloc(&dl_matrix_gpu, sizeof(int) * total_matrix);
// hipMalloc(&transpose_dl_matrix_gpu, sizeof(int) * total_matrix);
hipMalloc(&dl_matrix_gpu, sizeof(bool) * total_matrix);
hipMalloc(&transpose_dl_matrix_gpu, sizeof(bool) * total_matrix);
// hipMalloc(&conflict_edge_gpu, sizeof(int) * 2 * n);
hipMalloc(&next_col_gpu, sizeof(int) * total_matrix);
hipMalloc(&next_row_gpu, sizeof(int) * total_matrix);
// hipMemcpy(dl_matrix_gpu, datasets->dl_matrix.data(),
// sizeof(int) * total_matrix, hipMemcpyHostToDevice);
// hipMemcpy(transpose_dl_matrix_gpu, datasets->transpose_dl_matrix.data(),
// sizeof(int) * total_matrix, hipMemcpyHostToDevice);
hipMemcpy(dl_matrix_gpu, bool_dl_matrix, sizeof(bool) * total_matrix,
hipMemcpyHostToDevice);
hipMemcpy(transpose_dl_matrix_gpu, bool_transpose_dl_matrix,
sizeof(bool) * total_matrix, hipMemcpyHostToDevice);
hipMemcpy(next_col_gpu, datasets->next_col.data(),
sizeof(int) * total_matrix, hipMemcpyHostToDevice);
hipMemcpy(next_row_gpu, datasets->next_row.data(),
sizeof(int) * total_matrix, hipMemcpyHostToDevice);
hipMalloc(&results_gpu, sizeof(int) * total_row);
int *deleted_cols_gpu;
int *deleted_rows_gpu;
int *col_group_gpu;
int *row_group_gpu;
int *conflict_count_gpu;
// hipMalloc(&deleted_cols_gpu, sizeof(int) * total_col);
// hipMalloc(&deleted_rows_gpu, sizeof(int) * total_row);
hipMalloc(&col_group_gpu, sizeof(int) * total_col);
hipMalloc(&row_group_gpu, sizeof(int) * total_row);
// hipMalloc(&conflict_count_gpu, sizeof(int) * total_col);
hipMemcpy(col_group_gpu, datasets->col_group.data(), sizeof(int) * total_col,
hipMemcpyHostToDevice);
int *vertex_num_gpu;
int *total_dl_matrix_col_num_gpu;
int *total_dl_matrix_row_num_gpu;
hipMalloc(&vertex_num_gpu, sizeof(int) * n);
hipMalloc(&total_dl_matrix_col_num_gpu, sizeof(int) * n);
hipMalloc(&total_dl_matrix_row_num_gpu, sizeof(int) * n);
hipMemcpy(vertex_num_gpu, datasets->vertex_num.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
hipMemcpy(total_dl_matrix_col_num_gpu,
datasets->total_dl_matrix_col_num.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
hipMemcpy(total_dl_matrix_row_num_gpu,
datasets->total_dl_matrix_row_num.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
int *offset_col_gpu;
int *offset_row_gpu;
int *offset_matrix_gpu;
int *max_gpu;
hipMalloc(&offset_col_gpu, sizeof(int) * n);
hipMalloc(&offset_row_gpu, sizeof(int) * n);
hipMalloc(&offset_matrix_gpu, sizeof(int) * n);
// hipMalloc(&max_gpu, sizeof(int) * n);
hipMemcpy(offset_col_gpu, datasets->offset_col.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
hipMemcpy(offset_row_gpu, datasets->offset_row.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
hipMemcpy(offset_matrix_gpu, datasets->offset_matrix.data(), sizeof(int) * n,
hipMemcpyHostToDevice);
int *search_depth_gpu;
int *selected_row_id_gpu;
int *current_conflict_count_gpu;
int *conflict_node_id_gpu;
int *conflict_col_id_gpu;
int *existance_of_candidate_rows_gpu;
// hipMalloc(&search_depth_gpu, sizeof(int) * n);
// hipMalloc(&selected_row_id_gpu, sizeof(int) * n);
// hipMalloc(¤t_conflict_count_gpu, sizeof(int) * n);
// hipMalloc(&conflict_node_id_gpu, sizeof(int) * n);
// hipMalloc(&conflict_col_id_gpu, sizeof(int) * n);
// hipMalloc(&existance_of_candidate_rows_gpu, sizeof(int) * n);
timer.EndDataLoadTime();
int hard_conflict_threshold = THRESHOLD;
int graph_per_block = 1;
int thread_count = 32;
dim3 thread_size(thread_count, graph_per_block);
hipDeviceSynchronize();
hipLaunchKernelGGL(( gpu_mg::init_vertex_group), dim3(n), dim3(32), 0, 0,
row_group_gpu, dl_matrix_gpu, vertex_num_gpu, total_dl_matrix_col_num_gpu,
total_dl_matrix_row_num_gpu, offset_row_gpu, offset_matrix_gpu, n);
hipDeviceSynchronize();
timer.StartCoreTime();
hipProfilerStart();
hipLaunchKernelGGL(( gpu_mg::mc_solver), dim3(n / graph_per_block + 1), dim3(thread_size), 0, 0,
dl_matrix_gpu, transpose_dl_matrix_gpu, next_col_gpu, next_row_gpu,
results_gpu, deleted_cols_gpu, deleted_rows_gpu, col_group_gpu,
row_group_gpu, conflict_count_gpu, vertex_num_gpu,
total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu, offset_col_gpu,
offset_row_gpu, offset_matrix_gpu, search_depth_gpu, selected_row_id_gpu,
current_conflict_count_gpu, conflict_node_id_gpu, conflict_col_id_gpu,
existance_of_candidate_rows_gpu, conflict_edge_gpu, max_gpu, n,
hard_conflict_threshold, graph_per_block);
hipDeviceSynchronize();
hipProfilerStop();
timer.EndCoreTime();
std::vector<int> results(total_row, 0);
hipMemcpy(results.data(), results_gpu, sizeof(int) * total_row,
hipMemcpyDeviceToHost);
datasets->final_result.clear();
for (int k = 0; k < n; k++) {
for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
if (results[datasets->offset_row[k] + i] > 0) {
if (i + 1 > 3 * datasets->vertex_num[k]) {
datasets->final_result.push_back(i + 2);
} else {
datasets->final_result.push_back(i + 1);
}
}
}
}
// /*
if (print_result) {
hipMemcpy(deleted_cols.data(), deleted_cols_gpu, sizeof(int) * total_col,
hipMemcpyDeviceToHost);
for (int k = 0; k < n; k++) {
// for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
// std::cout << results[datasets->offset_row[k] + i] << ' ';
// }
// std::cout << std::endl;
// for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
// if (results[datasets->offset_row[k] + i] > 0) {
// std::cout << i << ' ';
// }
// }
// std::cout << std::endl;
for (int i = 0; i < datasets->total_dl_matrix_col_num[k]; i++) {
if (deleted_cols[datasets->offset_col[k] + i] == -1) {
conflict_count[k]++;
}
}
// 3 is the number of color
if (conflict_count[k] > 0){
std::cout << "Conflict Num is " << conflict_count[k] / 3 << std::endl;
}
}
}
// */
hipFree(dl_matrix_gpu);
hipFree(next_col_gpu);
hipFree(next_row_gpu);
hipFree(results_gpu);
// hipFree(deleted_cols_gpu);
// hipFree(deleted_rows_gpu);
hipFree(col_group_gpu);
hipFree(row_group_gpu);
// hipFree(conflict_count_gpu);
// hipFree(max_gpu);
hipFree(vertex_num_gpu);
hipFree(total_dl_matrix_col_num_gpu);
hipFree(total_dl_matrix_row_num_gpu);
hipFree(offset_col_gpu);
hipFree(offset_row_gpu);
hipFree(offset_matrix_gpu);
// hipFree(search_depth_gpu);
// hipFree(selected_row_id_gpu);
// hipFree(current_conflict_count_gpu);
// hipFree(conflict_col_id_gpu);
// hipFree(conflict_node_id_gpu);
// hipFree(existance_of_candidate_rows_gpu);
// hipFree(conflict_edge_gpu);
delete[] bool_dl_matrix;
delete[] bool_transpose_dl_matrix;
hipDeviceReset();
return timer;
}
MeasureTimer Invoke(const ImplVersion version, bool print_result,
DataSet *dataset) {
MeasureTimer default_timer;
switch (version) {
case ImplVersion::ORIGINAL_CPU:
return Invoke_ORIGINAL_CPU(dataset, print_result);
case ImplVersion::ORIGINAL_GPU:
// return Invoke_ORIGINAL_GPU(dataset, print_result);
default:
std::cout << "Not Impl yet" << std::endl;
return default_timer;
}
}
MeasureTimer Invoke(const ImplVersion version, bool print_result,
DataSets *datasets) {
MeasureTimer default_timer;
switch (version) {
case ImplVersion::ORIGINAL_GPU_MG:
return Invoke_ORIGINAL_GPU_MG(datasets, print_result);
default:
std::cout << "Not Impl yet" << std::endl;
return default_timer;
}
}
| 5f172721542be0848d1cf92dce9e7574f5874e48.cu | #include "target_fn.h"
#include <cuda_profiler_api.h>
#include "../cpu/MatrixCover.h"
#include "../gpu-mg/MatrixCoverGPU.cuh"
// #include "../gpu/MatrixCoverGPU.cuh"
#define THRESHOLD 100
MeasureTimer Invoke_ORIGINAL_CPU(DataSet *dataset, bool print_result) {
MeasureTimer timer;
int total_col = dataset->total_dl_matrix_col_num;
int total_row = dataset->total_dl_matrix_row_num;
std::vector<int> results(total_row, 0);
std::vector<int> deleted_cols(total_col, 0);
int **dl_matrix = new int *[total_row];
int **next_row = new int *[total_row];
for (int i = 0; i < total_row; i++) {
dl_matrix[i] = new int[total_col];
next_row[i] = new int[total_col];
for (int j = 0; j < total_col; j++) {
dl_matrix[i][j] = dataset->dl_matrix[i * total_col + j];
next_row[i][j] = dataset->next_row[i * total_col + j];
}
}
int hard_conflict_threshold = THRESHOLD;
timer.StartCoreTime();
mc_solver(dl_matrix, next_row, results.data(), deleted_cols.data(),
dataset->col_group.data(), dataset->vertex_num, total_row,
total_col, hard_conflict_threshold);
timer.EndCoreTime();
dataset->final_result.clear();
for (int i = 0; i < total_row; i++) {
if (results[i] > 0) {
if (i + 1 > 3 * dataset->vertex_num) {
dataset->final_result.push_back(i + 2);
} else {
dataset->final_result.push_back(i + 1);
}
}
}
if (print_result) {
int conflict_count = 0;
for (int i = 0; i < total_row; i++) {
std::cout << results[i] << ' ';
}
std::cout << std::endl;
for (int i = 0; i < total_row; i++) {
if (results[i] > 0) {
std::cout << i << ' ';
}
}
std::cout << std::endl;
for (int i = 0; i < total_col; i++) {
if (deleted_cols[i] == -1) {
conflict_count++;
}
}
std::cout << "Conflict Num is " << conflict_count / 3 << std::endl;
}
for (int i = 0; i < total_row; i++) {
delete[] dl_matrix[i];
delete[] next_row[i];
}
delete[] dl_matrix;
delete[] next_row;
return timer;
}
// MeasureTimer Invoke_ORIGINAL_GPU(DataSet *dataset, bool print_result) {
// MeasureTimer timer;
// timer.StartDataLoadTime();
// int total_col = dataset->total_dl_matrix_col_num;
// int total_row = dataset->total_dl_matrix_row_num;
// std::vector<int> deleted_cols(total_col, 0);
// std::vector<int> deleted_rows(total_row, 0);
// int thread_size = 32;
// int conflict_count = 0;
// int vertex_num = dataset->vertex_num;
// int vertex_num_gpu = vertex_num;
// int total_dl_matrix_row_num = total_row;
// int total_dl_matrix_col_num = total_col;
// int total_dl_matrix_row_num_gpu = total_row;
// int total_dl_matrix_col_num_gpu = total_col;
// // allocate necessary vectors and matrix on GPU
// int *dl_matrix_gpu;
// int *deleted_cols_gpu;
// int *col_group_gpu;
// int *results_gpu;
// int *conflict_count_gpu;
// int *deleted_rows_gpu;
// int *row_group_gpu;
// // dl_matrix_gpu = new int *[total_dl_matrix_row_num];
// cudaMalloc(&dl_matrix_gpu,
// sizeof(int) * total_dl_matrix_row_num *
// total_dl_matrix_col_num);
// cudaMemcpy(dl_matrix_gpu, dataset->dl_matrix.data(),
// sizeof(int) * total_dl_matrix_row_num * total_dl_matrix_col_num,
// cudaMemcpyHostToDevice);
// cudaMalloc(&deleted_cols_gpu, sizeof(int) * total_dl_matrix_col_num);
// cudaMalloc(&col_group_gpu, sizeof(int) * total_dl_matrix_col_num);
// cudaMalloc(&results_gpu, sizeof(int) * total_dl_matrix_row_num);
// cudaMalloc(&conflict_count_gpu, sizeof(int) * total_dl_matrix_col_num);
// cudaMalloc(&deleted_rows_gpu, sizeof(int) * total_dl_matrix_row_num);
// cudaMalloc(&row_group_gpu, sizeof(int) * total_dl_matrix_row_num);
// std::vector<int> row_group(total_dl_matrix_row_num_gpu, 0);
// // get col and row group
// gpu::init_vectors<<<1, thread_size>>>(row_group_gpu,
// total_dl_matrix_row_num_gpu);
// cudaMemcpy(row_group.data(), row_group_gpu,
// sizeof(int) * total_dl_matrix_row_num_gpu,
// cudaMemcpyDeviceToHost);
// if (print_result) {
// std::cout << "print row group" << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << row_group[i] << ' ';
// }
// std::cout << std::endl;
// }
// gpu::get_vertex_row_group<<<1, thread_size>>>(
// row_group_gpu, dl_matrix_gpu, vertex_num_gpu,
// total_dl_matrix_row_num_gpu,
// total_dl_matrix_col_num_gpu);
// cudaMemcpy(row_group.data(), row_group_gpu,
// sizeof(int) * total_dl_matrix_row_num_gpu,
// cudaMemcpyDeviceToHost);
// if (print_result) {
// std::cout << "print row group" << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << row_group[i] << ' ';
// }
// std::cout << std::endl;
// }
// cudaMemcpy(col_group_gpu, dataset->col_group.data(),
// sizeof(int) * total_dl_matrix_col_num, cudaMemcpyHostToDevice);
// timer.EndDataLoadTime();
// timer.StartCoreTime();
// cudaProfilerStart();
// gpu::mc_solver(dl_matrix_gpu, results_gpu, deleted_cols_gpu,
// deleted_rows_gpu,
// col_group_gpu, row_group_gpu, conflict_count_gpu,
// vertex_num_gpu, total_dl_matrix_row_num_gpu,
// total_dl_matrix_col_num_gpu);
// cudaDeviceSynchronize();
// cudaProfilerStop();
// timer.EndCoreTime();
// std::vector<int> results(total_dl_matrix_row_num);
// cudaMemcpy(results.data(), results_gpu, sizeof(int) *
// total_dl_matrix_row_num,
// cudaMemcpyDeviceToHost);
// cudaMemcpy(deleted_cols.data(), deleted_cols_gpu,
// sizeof(int) * total_dl_matrix_col_num, cudaMemcpyDeviceToHost);
// dataset->final_result.clear();
// for (int i = 0; i < total_row; i++) {
// if (results[i] > 0) {
// // std::cout<<"debug"<<dataset->final_result.empty()<<std::endl;
// if (i + 1 > 3 * dataset->vertex_num) {
// dataset->final_result.push_back(i + 2);
// } else {
// dataset->final_result.push_back(i + 1);
// }
// }
// }
// if (print_result) {
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// std::cout << results[i] << ' ';
// }
// std::cout << std::endl;
// for (int i = 0; i < total_dl_matrix_row_num; i++) {
// if (results[i] > 0) {
// std::cout << i << ' ';
// }
// }
// std::cout << std::endl;
// for (int i = 0; i < total_dl_matrix_col_num; i++) {
// if (deleted_cols[i] == -1) {
// conflict_count++;
// }
// }
// std::cout << "Conflict Num is " << conflict_count / 3 << std::endl;
// }
// cudaFree(dl_matrix_gpu);
// cudaFree(deleted_cols_gpu);
// cudaFree(col_group_gpu);
// cudaFree(results_gpu);
// cudaFree(conflict_count_gpu);
// cudaFree(deleted_rows_gpu);
// cudaFree(row_group_gpu);
// return timer;
// }
MeasureTimer Invoke_ORIGINAL_GPU_MG(DataSets *datasets, bool print_result) {
MeasureTimer timer;
int total_row = 0, total_col = 0;
int n = datasets->graph_count;
for (int i = 0; i < n; ++i) {
total_row += datasets->total_dl_matrix_row_num[i];
total_col += datasets->total_dl_matrix_col_num[i];
}
int total_matrix = datasets->dl_matrix.size();
std::vector<int> deleted_cols(total_col, 0);
std::vector<int> deleted_rows(total_row, 0);
std::vector<int> conflict_count(n, 0);
bool *bool_dl_matrix = new bool[datasets->bool_dl_matrix.size()];
bool *bool_transpose_dl_matrix =
new bool[datasets->bool_transpose_dl_matrix.size()];
for (int i = 0; i < datasets->bool_dl_matrix.size(); ++i) {
bool_dl_matrix[i] = datasets->bool_dl_matrix[i];
bool_transpose_dl_matrix[i] = datasets->bool_transpose_dl_matrix[i];
}
timer.StartDataLoadTime();
bool *dl_matrix_gpu;
bool *transpose_dl_matrix_gpu;
int *next_col_gpu;
int *next_row_gpu;
int *results_gpu;
int *conflict_edge_gpu;
// cudaMalloc(&dl_matrix_gpu, sizeof(int) * total_matrix);
// cudaMalloc(&transpose_dl_matrix_gpu, sizeof(int) * total_matrix);
cudaMalloc(&dl_matrix_gpu, sizeof(bool) * total_matrix);
cudaMalloc(&transpose_dl_matrix_gpu, sizeof(bool) * total_matrix);
// cudaMalloc(&conflict_edge_gpu, sizeof(int) * 2 * n);
cudaMalloc(&next_col_gpu, sizeof(int) * total_matrix);
cudaMalloc(&next_row_gpu, sizeof(int) * total_matrix);
// cudaMemcpy(dl_matrix_gpu, datasets->dl_matrix.data(),
// sizeof(int) * total_matrix, cudaMemcpyHostToDevice);
// cudaMemcpy(transpose_dl_matrix_gpu, datasets->transpose_dl_matrix.data(),
// sizeof(int) * total_matrix, cudaMemcpyHostToDevice);
cudaMemcpy(dl_matrix_gpu, bool_dl_matrix, sizeof(bool) * total_matrix,
cudaMemcpyHostToDevice);
cudaMemcpy(transpose_dl_matrix_gpu, bool_transpose_dl_matrix,
sizeof(bool) * total_matrix, cudaMemcpyHostToDevice);
cudaMemcpy(next_col_gpu, datasets->next_col.data(),
sizeof(int) * total_matrix, cudaMemcpyHostToDevice);
cudaMemcpy(next_row_gpu, datasets->next_row.data(),
sizeof(int) * total_matrix, cudaMemcpyHostToDevice);
cudaMalloc(&results_gpu, sizeof(int) * total_row);
int *deleted_cols_gpu;
int *deleted_rows_gpu;
int *col_group_gpu;
int *row_group_gpu;
int *conflict_count_gpu;
// cudaMalloc(&deleted_cols_gpu, sizeof(int) * total_col);
// cudaMalloc(&deleted_rows_gpu, sizeof(int) * total_row);
cudaMalloc(&col_group_gpu, sizeof(int) * total_col);
cudaMalloc(&row_group_gpu, sizeof(int) * total_row);
// cudaMalloc(&conflict_count_gpu, sizeof(int) * total_col);
cudaMemcpy(col_group_gpu, datasets->col_group.data(), sizeof(int) * total_col,
cudaMemcpyHostToDevice);
int *vertex_num_gpu;
int *total_dl_matrix_col_num_gpu;
int *total_dl_matrix_row_num_gpu;
cudaMalloc(&vertex_num_gpu, sizeof(int) * n);
cudaMalloc(&total_dl_matrix_col_num_gpu, sizeof(int) * n);
cudaMalloc(&total_dl_matrix_row_num_gpu, sizeof(int) * n);
cudaMemcpy(vertex_num_gpu, datasets->vertex_num.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
cudaMemcpy(total_dl_matrix_col_num_gpu,
datasets->total_dl_matrix_col_num.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
cudaMemcpy(total_dl_matrix_row_num_gpu,
datasets->total_dl_matrix_row_num.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
int *offset_col_gpu;
int *offset_row_gpu;
int *offset_matrix_gpu;
int *max_gpu;
cudaMalloc(&offset_col_gpu, sizeof(int) * n);
cudaMalloc(&offset_row_gpu, sizeof(int) * n);
cudaMalloc(&offset_matrix_gpu, sizeof(int) * n);
// cudaMalloc(&max_gpu, sizeof(int) * n);
cudaMemcpy(offset_col_gpu, datasets->offset_col.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
cudaMemcpy(offset_row_gpu, datasets->offset_row.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
cudaMemcpy(offset_matrix_gpu, datasets->offset_matrix.data(), sizeof(int) * n,
cudaMemcpyHostToDevice);
int *search_depth_gpu;
int *selected_row_id_gpu;
int *current_conflict_count_gpu;
int *conflict_node_id_gpu;
int *conflict_col_id_gpu;
int *existance_of_candidate_rows_gpu;
// cudaMalloc(&search_depth_gpu, sizeof(int) * n);
// cudaMalloc(&selected_row_id_gpu, sizeof(int) * n);
// cudaMalloc(¤t_conflict_count_gpu, sizeof(int) * n);
// cudaMalloc(&conflict_node_id_gpu, sizeof(int) * n);
// cudaMalloc(&conflict_col_id_gpu, sizeof(int) * n);
// cudaMalloc(&existance_of_candidate_rows_gpu, sizeof(int) * n);
timer.EndDataLoadTime();
int hard_conflict_threshold = THRESHOLD;
int graph_per_block = 1;
int thread_count = 32;
dim3 thread_size(thread_count, graph_per_block);
cudaDeviceSynchronize();
gpu_mg::init_vertex_group<<<n, 32>>>(
row_group_gpu, dl_matrix_gpu, vertex_num_gpu, total_dl_matrix_col_num_gpu,
total_dl_matrix_row_num_gpu, offset_row_gpu, offset_matrix_gpu, n);
cudaDeviceSynchronize();
timer.StartCoreTime();
cudaProfilerStart();
gpu_mg::mc_solver<<<n / graph_per_block + 1, thread_size>>>(
dl_matrix_gpu, transpose_dl_matrix_gpu, next_col_gpu, next_row_gpu,
results_gpu, deleted_cols_gpu, deleted_rows_gpu, col_group_gpu,
row_group_gpu, conflict_count_gpu, vertex_num_gpu,
total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu, offset_col_gpu,
offset_row_gpu, offset_matrix_gpu, search_depth_gpu, selected_row_id_gpu,
current_conflict_count_gpu, conflict_node_id_gpu, conflict_col_id_gpu,
existance_of_candidate_rows_gpu, conflict_edge_gpu, max_gpu, n,
hard_conflict_threshold, graph_per_block);
cudaDeviceSynchronize();
cudaProfilerStop();
timer.EndCoreTime();
std::vector<int> results(total_row, 0);
cudaMemcpy(results.data(), results_gpu, sizeof(int) * total_row,
cudaMemcpyDeviceToHost);
datasets->final_result.clear();
for (int k = 0; k < n; k++) {
for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
if (results[datasets->offset_row[k] + i] > 0) {
if (i + 1 > 3 * datasets->vertex_num[k]) {
datasets->final_result.push_back(i + 2);
} else {
datasets->final_result.push_back(i + 1);
}
}
}
}
// /*
if (print_result) {
cudaMemcpy(deleted_cols.data(), deleted_cols_gpu, sizeof(int) * total_col,
cudaMemcpyDeviceToHost);
for (int k = 0; k < n; k++) {
// for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
// std::cout << results[datasets->offset_row[k] + i] << ' ';
// }
// std::cout << std::endl;
// for (int i = 0; i < datasets->total_dl_matrix_row_num[k]; i++) {
// if (results[datasets->offset_row[k] + i] > 0) {
// std::cout << i << ' ';
// }
// }
// std::cout << std::endl;
for (int i = 0; i < datasets->total_dl_matrix_col_num[k]; i++) {
if (deleted_cols[datasets->offset_col[k] + i] == -1) {
conflict_count[k]++;
}
}
// 3 is the number of color
if (conflict_count[k] > 0){
std::cout << "Conflict Num is " << conflict_count[k] / 3 << std::endl;
}
}
}
// */
cudaFree(dl_matrix_gpu);
cudaFree(next_col_gpu);
cudaFree(next_row_gpu);
cudaFree(results_gpu);
// cudaFree(deleted_cols_gpu);
// cudaFree(deleted_rows_gpu);
cudaFree(col_group_gpu);
cudaFree(row_group_gpu);
// cudaFree(conflict_count_gpu);
// cudaFree(max_gpu);
cudaFree(vertex_num_gpu);
cudaFree(total_dl_matrix_col_num_gpu);
cudaFree(total_dl_matrix_row_num_gpu);
cudaFree(offset_col_gpu);
cudaFree(offset_row_gpu);
cudaFree(offset_matrix_gpu);
// cudaFree(search_depth_gpu);
// cudaFree(selected_row_id_gpu);
// cudaFree(current_conflict_count_gpu);
// cudaFree(conflict_col_id_gpu);
// cudaFree(conflict_node_id_gpu);
// cudaFree(existance_of_candidate_rows_gpu);
// cudaFree(conflict_edge_gpu);
delete[] bool_dl_matrix;
delete[] bool_transpose_dl_matrix;
cudaDeviceReset();
return timer;
}
MeasureTimer Invoke(const ImplVersion version, bool print_result,
DataSet *dataset) {
MeasureTimer default_timer;
switch (version) {
case ImplVersion::ORIGINAL_CPU:
return Invoke_ORIGINAL_CPU(dataset, print_result);
case ImplVersion::ORIGINAL_GPU:
// return Invoke_ORIGINAL_GPU(dataset, print_result);
default:
std::cout << "Not Impl yet" << std::endl;
return default_timer;
}
}
MeasureTimer Invoke(const ImplVersion version, bool print_result,
DataSets *datasets) {
MeasureTimer default_timer;
switch (version) {
case ImplVersion::ORIGINAL_GPU_MG:
return Invoke_ORIGINAL_GPU_MG(datasets, print_result);
default:
std::cout << "Not Impl yet" << std::endl;
return default_timer;
}
}
|
adam.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/adam.hpp"
namespace lbann {
namespace {
__global__ void adam_kernel(int height,
int width,
DataType correction,
DataType eps,
DataType beta1,
DataType beta2,
DataType * __restrict__ values,
int values_ldim,
const DataType * __restrict__ gradient,
int gradient_ldim,
DataType * __restrict__ moment1,
int moment1_ldim,
DataType * __restrict__ moment2,
int moment2_ldim) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = gridDim.x * blockDim.x;
for (int pos = tid; pos < height * width; pos += num_threads) {
const auto& i = pos % height;
const auto& j = pos / height;
const auto& g = gradient[i + j * gradient_ldim] + eps;
auto& m1 = moment1[i + j * moment1_ldim];
auto& m2 = moment2[i + j * moment2_ldim];
auto& x = values[i + j * values_ldim];
m1 = beta1 * m1 + (DataType(1) - beta1) * g;
m2 = beta2 * m2 + (DataType(1) - beta2) * g * g;
x -= correction * m1 / (sqrt(m2) + eps);
}
}
}
void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) {
// Precompute the bias correction and learning rate.
m_current_beta1 *= m_beta1;
m_current_beta2 *= m_beta2;
const DataType correction = m_learning_rate *
(std::sqrt(DataType(1) - m_current_beta2)
/ (DataType(1) - m_current_beta1));
// Get matrix dimensions
const int local_height = values.LocalHeight();
const int local_width = values.LocalWidth();
const int size = local_height * local_width;
if (size <= 0) { return; }
// Launch CUDA kernels
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (size + block_size - 1) / block_size;
CHECK_CUDA(hipSetDevice(this->m_cudnn->get_gpu()));
hipStream_t stream = this->m_cudnn->get_stream();
hipLaunchKernelGGL(( adam_kernel), dim3(grid_dims), dim3(block_dims), 0, stream,
local_height, local_width, correction, m_eps, m_beta1, m_beta2,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_moment1->Buffer(), m_moment1->LDim(),
m_moment2->Buffer(), m_moment2->LDim());
}
} // namespace lbann
| adam.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#include "lbann/optimizers/adam.hpp"
namespace lbann {
namespace {
__global__ void adam_kernel(int height,
int width,
DataType correction,
DataType eps,
DataType beta1,
DataType beta2,
DataType * __restrict__ values,
int values_ldim,
const DataType * __restrict__ gradient,
int gradient_ldim,
DataType * __restrict__ moment1,
int moment1_ldim,
DataType * __restrict__ moment2,
int moment2_ldim) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = gridDim.x * blockDim.x;
for (int pos = tid; pos < height * width; pos += num_threads) {
const auto& i = pos % height;
const auto& j = pos / height;
const auto& g = gradient[i + j * gradient_ldim] + eps;
auto& m1 = moment1[i + j * moment1_ldim];
auto& m2 = moment2[i + j * moment2_ldim];
auto& x = values[i + j * values_ldim];
m1 = beta1 * m1 + (DataType(1) - beta1) * g;
m2 = beta2 * m2 + (DataType(1) - beta2) * g * g;
x -= correction * m1 / (sqrt(m2) + eps);
}
}
}
void adam::step_compute_gpu(AbsDistMat& values, const AbsDistMat& gradient) {
// Precompute the bias correction and learning rate.
m_current_beta1 *= m_beta1;
m_current_beta2 *= m_beta2;
const DataType correction = m_learning_rate *
(std::sqrt(DataType(1) - m_current_beta2)
/ (DataType(1) - m_current_beta1));
// Get matrix dimensions
const int local_height = values.LocalHeight();
const int local_width = values.LocalWidth();
const int size = local_height * local_width;
if (size <= 0) { return; }
// Launch CUDA kernels
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (size + block_size - 1) / block_size;
CHECK_CUDA(cudaSetDevice(this->m_cudnn->get_gpu()));
cudaStream_t stream = this->m_cudnn->get_stream();
adam_kernel<<<grid_dims, block_dims, 0, stream>>>
(local_height, local_width, correction, m_eps, m_beta1, m_beta2,
values.Buffer(), values.LDim(),
gradient.LockedBuffer(), gradient.LDim(),
m_moment1->Buffer(), m_moment1->LDim(),
m_moment2->Buffer(), m_moment2->LDim());
}
} // namespace lbann
|
0848e6dd4235c8290e25992a5ebf20e0adb53ebf.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/native/sparse/hip/SparseHIPBlas.cuh>
#include <TH/THGeneral.h>
#include <hipsparse.h>
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10200)
const char* hipsparseGetErrorString(hipsparseStatus_t status) {
switch(status)
{
case HIPSPARSE_STATUS_SUCCESS:
return "success";
case HIPSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case HIPSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case HIPSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case HIPSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case HIPSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case HIPSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case HIPSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case HIPSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"hipsparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, HIPSPARSE_INDEX_BASE_ZERO));
}
hipsparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return HIPSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return HIPSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
hipsparseOperation_t opa = convertTransToCusparseOperation(transa);
hipsparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"hipsparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseMatDescr_t desc;
hipsparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(hipsparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(hipsparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(hipsparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
| 0848e6dd4235c8290e25992a5ebf20e0adb53ebf.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/native/sparse/cuda/SparseCUDABlas.cuh>
#include <TH/THGeneral.h>
#include <cusparse.h>
#if !defined(CUSPARSE_VERSION) || (CUSPARSE_VERSION < 10200)
const char* cusparseGetErrorString(cusparseStatus_t status) {
switch(status)
{
case CUSPARSE_STATUS_SUCCESS:
return "success";
case CUSPARSE_STATUS_NOT_INITIALIZED:
return "library not initialized";
case CUSPARSE_STATUS_ALLOC_FAILED:
return "resource allocation failed";
case CUSPARSE_STATUS_INVALID_VALUE:
return "an invalid numeric value was used as an argument";
case CUSPARSE_STATUS_ARCH_MISMATCH:
return "an absent device architectural feature is required";
case CUSPARSE_STATUS_MAPPING_ERROR:
return "an access to GPU memory space failed";
case CUSPARSE_STATUS_EXECUTION_FAILED:
return "the GPU program failed to execute";
case CUSPARSE_STATUS_INTERNAL_ERROR:
return "an internal operation failed";
case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED:
return "the matrix type is not supported by this function";
case CUSPARSE_STATUS_ZERO_PIVOT:
return "an entry of the matrix is either structural zero or numerical zero (singular block)";
default:
return "unknown error";
}
}
#endif
namespace at { namespace native { namespace sparse { namespace cuda {
void Xcoo2csr(const int *coorowind, int64_t nnz, int64_t m, int *csrrowptr) {
TORCH_CHECK((m <= INT_MAX) && (nnz <= INT_MAX),
"cusparseXcoo2csr only supports m, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
int i_m = (int)m;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoo2csr(handle, coorowind, i_nnz, i_m, csrrowptr, CUSPARSE_INDEX_BASE_ZERO));
}
cusparseOperation_t convertTransToCusparseOperation(char trans) {
if (trans == 't') return CUSPARSE_OPERATION_TRANSPOSE;
else if (trans == 'n') return CUSPARSE_OPERATION_NON_TRANSPOSE;
else if (trans == 'c') return CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE;
else {
AT_ERROR("trans must be one of: t, n, c");
}
}
void adjustLd(char transb, int64_t m, int64_t n, int64_t k, int64_t *ldb, int64_t *ldc)
{
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void Scsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, float alpha, float *csrvala, int *csrrowptra, int *csrcolinda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseScsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseScsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Dcsrmm2(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t nnz, double alpha, double *csrvala, int *csrrowptra, int *csrcolinda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
adjustLd(transb, m, n, k, &ldb, &ldc);
cusparseOperation_t opa = convertTransToCusparseOperation(transa);
cusparseOperation_t opb = convertTransToCusparseOperation(transb);
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (nnz <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX),
"cusparseDcsrmm2 only supports m, n, k, nnz, ldb, ldc with the bound [val] <= ", INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_nnz = (int)nnz;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseDcsrmm2(handle, opa, opb, i_m, i_n, i_k, i_nnz, &alpha, desc, csrvala, csrrowptra, csrcolinda, b, i_ldb, &beta, c, i_ldc));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
// TODO: Proper fix is to create real descriptor classes
}
/* format conversion */
void CreateIdentityPermutation(int64_t nnz, int *P) {
TORCH_CHECK((nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseCreateIdentityPermutation(handle, i_nnz, P);
}
void Xcsrsort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort_bufferSizeExt only supports m, n, nnz with the bound [val] <=",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort_bufferSizeExt(handle, i_m, i_n, i_nnz, csrRowPtr, csrColInd, pBufferSizeInBytes));
}
void Xcsrsort(int64_t m, int64_t n, int64_t nnz, const int *csrRowPtr, int *csrColInd, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcsrsort only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
cusparseMatDescr_t desc;
cusparseCreateMatDescr(&desc);
TORCH_CUDASPARSE_CHECK(cusparseXcsrsort(handle, i_m, i_n, i_nnz, desc, csrRowPtr, csrColInd, P, pBuffer));
TORCH_CUDASPARSE_CHECK(cusparseDestroyMatDescr(desc));
}
void Xcoosort_bufferSizeExt(int64_t m, int64_t n, int64_t nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"Xcoosort_bufferSizeExt only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosort_bufferSizeExt(handle, i_m, i_n, i_nnz, cooRows, cooCols, pBufferSizeInBytes));
}
void XcoosortByRow(int64_t m, int64_t n, int64_t nnz, int *cooRows, int *cooCols, int *P, void *pBuffer)
{
TORCH_CHECK((m <= INT_MAX) && (n <= INT_MAX) && (nnz <= INT_MAX),
"XcoosortByRow only supports m, n, nnz with the bound [val] <= ",
INT_MAX);
int i_m = (int)m;
int i_n = (int)n;
int i_nnz = (int)nnz;
auto handle = at::cuda::getCurrentCUDASparseHandle();
TORCH_CUDASPARSE_CHECK(cusparseXcoosortByRow(handle, i_m, i_n, i_nnz, cooRows, cooCols, P, pBuffer));
}
}}}} // namespace at::native::sparse::cuda
|
3f9af886614eac5cf0a87aedddf555c292e1b62c.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/quantile.h"
#include "../helpers.h"
#include "gtest/gtest.h"
TEST(SumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
auto sum = dh::SumReduction(data.data().get(), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAtomicSizeT() {
size_t constexpr kThreads = 235;
dh::device_vector<size_t> out(1, 0);
auto d_out = dh::ToSpan(out);
dh::LaunchN(0, kThreads, [=]__device__(size_t idx){
atomicAdd(&d_out[0], static_cast<size_t>(1));
});
ASSERT_EQ(out[0], kThreads);
}
TEST(AtomicAdd, SizeT) {
TestAtomicSizeT();
}
void TestSegmentID() {
std::vector<size_t> segments{0, 1, 3};
thrust::device_vector<size_t> d_segments(segments);
auto s_segments = dh::ToSpan(d_segments);
dh::LaunchN(0, 1, [=]__device__(size_t idx) {
auto id = dh::SegmentId(s_segments, 0);
SPAN_CHECK(id == 0);
id = dh::SegmentId(s_segments, 1);
SPAN_CHECK(id == 1);
id = dh::SegmentId(s_segments, 2);
SPAN_CHECK(id == 1);
});
}
TEST(SegmentID, Basic) {
TestSegmentID();
}
TEST(SegmentedUnique, Basic) {
std::vector<float> values{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.62448811531066895f, 0.4f};
std::vector<size_t> segments{0, 3, 6};
thrust::device_vector<float> d_values(values);
thrust::device_vector<xgboost::bst_feature_t> d_segments{segments};
thrust::device_vector<xgboost::bst_feature_t> d_segs_out(d_segments.size());
thrust::device_vector<float> d_vals_out(d_values.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
CHECK_EQ(n_uniques, 5);
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
for (auto i = 0 ; i < values_sol.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values_sol[i]);
}
std::vector<xgboost::bst_feature_t> segments_sol{0, 3, 5};
for (size_t i = 0; i < d_segments.size(); ++i) {
ASSERT_EQ(segments_sol[i], d_segs_out[i]);
}
d_segments[1] = 4;
d_segments[2] = 6;
n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
ASSERT_EQ(n_uniques, values.size());
for (auto i = 0 ; i < values.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values[i]);
}
}
namespace {
using SketchEntry = xgboost::common::WQSummary<float, float>::Entry;
struct SketchUnique {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value - b.value == 0;
}
};
struct IsSorted {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value < b.value;
}
};
} // namespace
namespace xgboost {
namespace common {
void TestSegmentedUniqueRegression(std::vector<SketchEntry> values, size_t n_duplicated) {
std::vector<bst_feature_t> segments{0, static_cast<bst_feature_t>(values.size())};
thrust::device_vector<SketchEntry> d_values(values);
thrust::device_vector<bst_feature_t> d_segments(segments);
thrust::device_vector<bst_feature_t> d_segments_out(segments.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(), d_values.data().get(),
d_values.data().get() + d_values.size(), d_segments_out.data().get(), d_values.data().get(),
SketchUnique{});
ASSERT_EQ(n_uniques, values.size() - n_duplicated);
ASSERT_TRUE(thrust::is_sorted(thrust::device, d_values.begin(),
d_values.begin() + n_uniques, IsSorted{}));
ASSERT_EQ(segments.at(0), d_segments_out[0]);
ASSERT_EQ(segments.at(1), d_segments_out[1] + n_duplicated);
}
TEST(DeviceHelpers, Reduce) {
size_t kSize = std::numeric_limits<uint32_t>::max();
auto it = thrust::make_counting_iterator(0ul);
dh::XGBCachingDeviceAllocator<char> alloc;
auto batched = dh::Reduce(thrust::hip::par(alloc), it, it + kSize, 0ul, thrust::maximum<size_t>{});
CHECK_EQ(batched, kSize - 1);
}
TEST(SegmentedUnique, Regression) {
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 3);
}
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 0);
}
{
std::vector<SketchEntry> values;
TestSegmentedUniqueRegression(values, 0);
}
}
TEST(Allocator, OOM) {
auto size = dh::AvailableMemory(0) * 4;
ASSERT_THROW({dh::caching_device_vector<char> vec(size);}, dmlc::Error);
ASSERT_THROW({dh::device_vector<char> vec(size);}, dmlc::Error);
// Clear last error so we don't fail subsequent tests
hipGetLastError();
}
TEST(DeviceHelpers, ArgSort) {
dh::device_vector<float> values(20);
dh::Iota(dh::ToSpan(values)); // accending
dh::device_vector<size_t> sorted_idx(20);
dh::ArgSort<false>(dh::ToSpan(values), dh::ToSpan(sorted_idx)); // sort to descending
ASSERT_TRUE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
dh::Iota(dh::ToSpan(values));
dh::device_vector<size_t> groups(3);
groups[0] = 0;
groups[1] = 10;
groups[2] = 20;
dh::SegmentedArgSort<false>(dh::ToSpan(values), dh::ToSpan(groups),
dh::ToSpan(sorted_idx));
ASSERT_FALSE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin(), sorted_idx.begin() + 10,
thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin() + 10, sorted_idx.end(),
thrust::greater<size_t>{}));
}
} // namespace common
} // namespace xgboost
| 3f9af886614eac5cf0a87aedddf555c292e1b62c.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/quantile.h"
#include "../helpers.h"
#include "gtest/gtest.h"
TEST(SumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
auto sum = dh::SumReduction(data.data().get(), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAtomicSizeT() {
size_t constexpr kThreads = 235;
dh::device_vector<size_t> out(1, 0);
auto d_out = dh::ToSpan(out);
dh::LaunchN(0, kThreads, [=]__device__(size_t idx){
atomicAdd(&d_out[0], static_cast<size_t>(1));
});
ASSERT_EQ(out[0], kThreads);
}
TEST(AtomicAdd, SizeT) {
TestAtomicSizeT();
}
void TestSegmentID() {
std::vector<size_t> segments{0, 1, 3};
thrust::device_vector<size_t> d_segments(segments);
auto s_segments = dh::ToSpan(d_segments);
dh::LaunchN(0, 1, [=]__device__(size_t idx) {
auto id = dh::SegmentId(s_segments, 0);
SPAN_CHECK(id == 0);
id = dh::SegmentId(s_segments, 1);
SPAN_CHECK(id == 1);
id = dh::SegmentId(s_segments, 2);
SPAN_CHECK(id == 1);
});
}
TEST(SegmentID, Basic) {
TestSegmentID();
}
TEST(SegmentedUnique, Basic) {
std::vector<float> values{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.62448811531066895f, 0.4f};
std::vector<size_t> segments{0, 3, 6};
thrust::device_vector<float> d_values(values);
thrust::device_vector<xgboost::bst_feature_t> d_segments{segments};
thrust::device_vector<xgboost::bst_feature_t> d_segs_out(d_segments.size());
thrust::device_vector<float> d_vals_out(d_values.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
CHECK_EQ(n_uniques, 5);
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
for (auto i = 0 ; i < values_sol.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values_sol[i]);
}
std::vector<xgboost::bst_feature_t> segments_sol{0, 3, 5};
for (size_t i = 0; i < d_segments.size(); ++i) {
ASSERT_EQ(segments_sol[i], d_segs_out[i]);
}
d_segments[1] = 4;
d_segments[2] = 6;
n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
ASSERT_EQ(n_uniques, values.size());
for (auto i = 0 ; i < values.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values[i]);
}
}
namespace {
using SketchEntry = xgboost::common::WQSummary<float, float>::Entry;
struct SketchUnique {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value - b.value == 0;
}
};
struct IsSorted {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value < b.value;
}
};
} // namespace
namespace xgboost {
namespace common {
void TestSegmentedUniqueRegression(std::vector<SketchEntry> values, size_t n_duplicated) {
std::vector<bst_feature_t> segments{0, static_cast<bst_feature_t>(values.size())};
thrust::device_vector<SketchEntry> d_values(values);
thrust::device_vector<bst_feature_t> d_segments(segments);
thrust::device_vector<bst_feature_t> d_segments_out(segments.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(), d_values.data().get(),
d_values.data().get() + d_values.size(), d_segments_out.data().get(), d_values.data().get(),
SketchUnique{});
ASSERT_EQ(n_uniques, values.size() - n_duplicated);
ASSERT_TRUE(thrust::is_sorted(thrust::device, d_values.begin(),
d_values.begin() + n_uniques, IsSorted{}));
ASSERT_EQ(segments.at(0), d_segments_out[0]);
ASSERT_EQ(segments.at(1), d_segments_out[1] + n_duplicated);
}
TEST(DeviceHelpers, Reduce) {
size_t kSize = std::numeric_limits<uint32_t>::max();
auto it = thrust::make_counting_iterator(0ul);
dh::XGBCachingDeviceAllocator<char> alloc;
auto batched = dh::Reduce(thrust::cuda::par(alloc), it, it + kSize, 0ul, thrust::maximum<size_t>{});
CHECK_EQ(batched, kSize - 1);
}
TEST(SegmentedUnique, Regression) {
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 3);
}
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 0);
}
{
std::vector<SketchEntry> values;
TestSegmentedUniqueRegression(values, 0);
}
}
TEST(Allocator, OOM) {
auto size = dh::AvailableMemory(0) * 4;
ASSERT_THROW({dh::caching_device_vector<char> vec(size);}, dmlc::Error);
ASSERT_THROW({dh::device_vector<char> vec(size);}, dmlc::Error);
// Clear last error so we don't fail subsequent tests
cudaGetLastError();
}
TEST(DeviceHelpers, ArgSort) {
dh::device_vector<float> values(20);
dh::Iota(dh::ToSpan(values)); // accending
dh::device_vector<size_t> sorted_idx(20);
dh::ArgSort<false>(dh::ToSpan(values), dh::ToSpan(sorted_idx)); // sort to descending
ASSERT_TRUE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
dh::Iota(dh::ToSpan(values));
dh::device_vector<size_t> groups(3);
groups[0] = 0;
groups[1] = 10;
groups[2] = 20;
dh::SegmentedArgSort<false>(dh::ToSpan(values), dh::ToSpan(groups),
dh::ToSpan(sorted_idx));
ASSERT_FALSE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin(), sorted_idx.begin() + 10,
thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin() + 10, sorted_idx.end(),
thrust::greater<size_t>{}));
}
} // namespace common
} // namespace xgboost
|
a2c3e80e516ef5bb769d409c93539eb9a37c9751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void gatherLocations_gpu( const float *values, const float *zmin, float *dest) {
dest[0] = values[0] + (values[3] - values[0] + *zmin);
dest[1] = values[0];
dest[2] = values[1]/values[0];
dest[3] = values[2]/values[0];
dest[4] = values[3];
}
// CUDA kernel function
__global__ void op_cuda_gatherLocations(
const float *__restrict ind_arg0,
const int *__restrict opDat0Map,
const float *arg1,
float *arg2,
int start,
int end,
int set_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid + start < end) {
int n = tid + start;
//initialise local variables
int map0idx;
map0idx = opDat0Map[n + set_size * 0];
//user-supplied kernel call
gatherLocations_gpu(ind_arg0+map0idx*4,
arg1,
arg2+n*5);
}
}
//host stub function
void op_par_loop_gatherLocations(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg1h = (float *)arg1.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
int ninds = 1;
int inds[3] = {0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: gatherLocations\n");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg1.data = OP_consts_h + consts_bytes;
arg1.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg1.data)[d] = arg1h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
#endif
for ( int round=0; round<2; round++ ){
if (round==1) {
op_mpi_wait_all_grouped(nargs, args, 2);
}
int start = round==0 ? 0 : set->core_size;
int end = round==0 ? set->core_size : set->size + set->exec_size;
if (end-start>0) {
int nblocks = (end-start-1)/nthread+1;
hipLaunchKernelGGL(( op_cuda_gatherLocations), dim3(nblocks),dim3(nthread), 0, 0,
(float *)arg0.data_d,
arg0.map_data_d,
(float*)arg1.data_d,
(float*)arg2.data_d,
start,end,set->size+set->exec_size);
}
}
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
}
| a2c3e80e516ef5bb769d409c93539eb9a37c9751.cu | //
// auto-generated by op2.py
//
//user function
__device__ void gatherLocations_gpu( const float *values, const float *zmin, float *dest) {
dest[0] = values[0] + (values[3] - values[0] + *zmin);
dest[1] = values[0];
dest[2] = values[1]/values[0];
dest[3] = values[2]/values[0];
dest[4] = values[3];
}
// CUDA kernel function
__global__ void op_cuda_gatherLocations(
const float *__restrict ind_arg0,
const int *__restrict opDat0Map,
const float *arg1,
float *arg2,
int start,
int end,
int set_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid + start < end) {
int n = tid + start;
//initialise local variables
int map0idx;
map0idx = opDat0Map[n + set_size * 0];
//user-supplied kernel call
gatherLocations_gpu(ind_arg0+map0idx*4,
arg1,
arg2+n*5);
}
}
//host stub function
void op_par_loop_gatherLocations(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg1h = (float *)arg1.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(20);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[20].name = name;
OP_kernels[20].count += 1;
int ninds = 1;
int inds[3] = {0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: gatherLocations\n");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg1.data = OP_consts_h + consts_bytes;
arg1.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg1.data)[d] = arg1h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_20
int nthread = OP_BLOCK_SIZE_20;
#else
int nthread = OP_block_size;
#endif
for ( int round=0; round<2; round++ ){
if (round==1) {
op_mpi_wait_all_grouped(nargs, args, 2);
}
int start = round==0 ? 0 : set->core_size;
int end = round==0 ? set->core_size : set->size + set->exec_size;
if (end-start>0) {
int nblocks = (end-start-1)/nthread+1;
op_cuda_gatherLocations<<<nblocks,nthread>>>(
(float *)arg0.data_d,
arg0.map_data_d,
(float*)arg1.data_d,
(float*)arg2.data_d,
start,end,set->size+set->exec_size);
}
}
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[20].time += wall_t2 - wall_t1;
}
|
e3b27801d5cfbf41e0d196c91f49d242b2ab64bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include "compute_hist_loop_one_stat.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int Bits,
int BlockSize>
struct TPointHistOneByte {
const int InnerHistBitsCount = Bits - 5;
float* Histogram;
static constexpr int GetHistSize() {
return BlockSize * 32;
}
static constexpr int AddPointsBatchSize() {
return TLoadSize<LoadSize()>::Size();
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 700
const int NN = 2;
#else
const int NN = 4;
#endif
return NN;
}
static constexpr int GetBlockSize() {
return BlockSize;
}
static constexpr ELoadSize LoadSize() {
#if __CUDA_ARCH__ < 500
return ELoadSize::OneElement;
#else
return ELoadSize::FourElements;
// return ELoadSize::TwoElements;
#endif
}
static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) {
return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType);
}
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* hist) {
static_assert(Bits >= 5, "Error: this hist is for 5-8 bits");
const int histSize = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
hist[i] = 0;
}
Histogram = hist + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = (threadIdx.x + i) & 3;
int bin = (ci >> (24 - 8 * f)) & 255;
// int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> Bits) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Histogram[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Histogram[offset] += statToAdd;
}
}
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + i) & 3;
int bins[N];
float stats[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = (ci[k] >> (24 - 8 * f)) & 255;
// bins[k] = bfe(ci[k], 24 - 8 * f, 8);
stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f;
}
int offsets[N];
int higherBin[N];
const int mask = (1 << InnerHistBitsCount) - 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
higherBin[k] = (bins[k] >> 5) & mask;
offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5);
}
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
if (pass == higherBin[j]) {
Histogram[offsets[j]] += stats[j];
}
}
}
} else {
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
Histogram[offsets[j]] += stats[j];
}
}
}
}
template <int N>
__forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) {
const int NN = AddPointsBatchSize();
static_assert(N % NN == 0, "Error: incorrect stripe size");
#pragma unroll
for (int k = 0; k < N; k += NN) {
AddPointsImpl<NN>(ci + k, t + k);
}
}
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Histogram[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
TFeatureInBlock group = features[fid];
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup;
if (fold < features[fid].Folds) {
const float val = Histogram[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
/*
* Single part
*/
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
}
| e3b27801d5cfbf41e0d196c91f49d242b2ab64bf.cu | #include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include "compute_hist_loop_one_stat.cuh"
#include <cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int Bits,
int BlockSize>
struct TPointHistOneByte {
const int InnerHistBitsCount = Bits - 5;
float* Histogram;
static constexpr int GetHistSize() {
return BlockSize * 32;
}
static constexpr int AddPointsBatchSize() {
return TLoadSize<LoadSize()>::Size();
}
static constexpr int Unroll(ECIndexLoadType) {
#if __CUDA_ARCH__ < 700
const int NN = 2;
#else
const int NN = 4;
#endif
return NN;
}
static constexpr int GetBlockSize() {
return BlockSize;
}
static constexpr ELoadSize LoadSize() {
#if __CUDA_ARCH__ < 500
return ELoadSize::OneElement;
#else
return ELoadSize::FourElements;
// return ELoadSize::TwoElements;
#endif
}
static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) {
return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType);
}
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* hist) {
static_assert(Bits >= 5, "Error: this hist is for 5-8 bits");
const int histSize = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
hist[i] = 0;
}
Histogram = hist + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = (threadIdx.x + i) & 3;
int bin = (ci >> (24 - 8 * f)) & 255;
// int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> Bits) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Histogram[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Histogram[offset] += statToAdd;
}
}
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) {
auto syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + i) & 3;
int bins[N];
float stats[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins[k] = (ci[k] >> (24 - 8 * f)) & 255;
// bins[k] = bfe(ci[k], 24 - 8 * f, 8);
stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f;
}
int offsets[N];
int higherBin[N];
const int mask = (1 << InnerHistBitsCount) - 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
higherBin[k] = (bins[k] >> 5) & mask;
offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5);
}
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
if (pass == higherBin[j]) {
Histogram[offsets[j]] += stats[j];
}
}
}
} else {
syncTile.sync();
#pragma unroll
for (int j = 0; j < N; ++j) {
Histogram[offsets[j]] += stats[j];
}
}
}
}
template <int N>
__forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) {
const int NN = AddPointsBatchSize();
static_assert(N % NN == 0, "Error: incorrect stripe size");
#pragma unroll
for (int k = 0; k < N; k += NN) {
AddPointsImpl<NN>(ci + k, t + k);
}
}
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Histogram[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
__forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount,
const TFeatureInBlock* features,
int fCount,
int leafId, int leafCount,
float* binSums) {
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
TFeatureInBlock group = features[fid];
const int deviceOffset = group.GroupOffset * statCount * leafCount;
const int entriesPerLeaf = statCount * group.GroupSize;
float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup;
if (fold < features[fid].Folds) {
const float val = Histogram[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (blockCount > 1) {
atomicAdd(dst + fold, val);
} else {
dst[fold] = val;
}
}
}
}
}
};
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32* partIds,
ui32 partCount,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = partCount;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partIds,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (partCount) {
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
}
#undef PASS
#undef HIST2_PASS
}
/*
* Single part
*/
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* bins,
ui32 binsLineSize,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
numBlocks.x = (fCount + 3) / 4;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
bins, binsLineSize,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bits count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
void ComputeHistOneByte(int maxBins,
const TFeatureInBlock* features,
const int fCount,
const TDataPartition* parts,
const ui32 partId,
const ui32* cindex,
const int* indices,
const float* stats,
ui32 numStats,
ui32 statLineSize,
float* histograms,
TCudaStream stream) {
#define PASS(Bits, NumStats)\
const int blockSize = 384;\
dim3 numBlocks;\
numBlocks.z = NumStats;\
numBlocks.y = 1;\
const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\
const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\
const int groupCount = (fCount + 3) / 4;\
numBlocks.x = groupCount;\
numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\
using THist = TPointHistOneByte<Bits, blockSize>;\
ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\
features,\
fCount,\
cindex,\
indices,\
stats, numStats, \
statLineSize,\
parts,\
partId,\
histograms);
#define HIST2_PASS(Bits)\
if (numStats % 2 != 0) {\
PASS(Bits, 1)\
ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
} else {\
ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\
}
if (maxBins <= 32) {
HIST2_PASS(5)
} else if (maxBins <= 64) {
HIST2_PASS(6)
// PASS(6, numStats)
} else if (maxBins <= 128) {
HIST2_PASS(7)
// PASS(7, numStats)
} else if (maxBins <= 255) {
PASS(8, numStats)
} else {
CB_ENSURE(false, "Unsupported bins count " << maxBins);
}
#undef PASS
#undef HIST2_PASS
}
}
|
a023d17ec9b4fe411f3ec13c59a75496e0fbc799.hip | // !!! This is a file automatically generated by hipify!!!
// @file nnconv_cudnn.cu
// @brief Convolution block CuDNN-based implementation.
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#if !defined(ENABLE_GPU) | !defined(ENABLE_CUDNN)
#error "nnconv_cudnn.hpp cannot be compiled without GPU and CUDNN support."
#endif
#include "nnconv_cudnn.hpp"
#include "cudnnhelper.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <algorithm>
using namespace vl ;
#define CHECK(x) \
{ \
cudnnError = x ; \
if (cudnnError != CUDNN_STATUS_SUCCESS) { \
error = context.setError(context.getCudaHelper().catchCudnnError(cudnnError, \
STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \
goto done ; \
} }
/* ---------------------------------------------------------------- */
/* nnconv_forward_cudnn */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnconv_cudnn<dataType>::forward(Context& context,
Tensor output, double outputMult,
Tensor data, double dataMult,
Tensor filters,
Tensor biases,
int strideY, int strideX,
int padTop, int padBottom,
int padLeft, int padRight,
int dilateY, int dilateX)
{
assert(output) ;
assert(data) ;
assert(filters) ;
typedef typename DataTypeTraits<dataType>::type type ;
cudnnTensorDescriptor_t outputDesc, biasesDesc, dataDesc ;
cudnnFilterDescriptor_t filtersDesc ;
cudnnConvolutionDescriptor_t convDesc ;
bool outputDescInitialized = false ;
bool biasesDescInitialized = false ;
bool dataDescInitialized = false ;
bool filtersDescInitialized = false ;
bool convDescInitialized = false ;
void* workSpace = NULL ;
int numGroups = data.getDepth() / filters.getDepth() ;
int numFiltersPerGroup = filters.getSize() / numGroups ;
if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ;
if (padLeft != padRight) return vl::VLE_Unsupported ;
if (padTop != padBottom) return vl::VLE_Unsupported ;
if (filters.getHeight() > data.getHeight()) return vl::VLE_Unsupported ;
if (filters.getWidth() > data.getWidth()) return vl::VLE_Unsupported ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// Get CuDNN
CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get tensor descripotrs
CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ;
outputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(outputDesc,
DataTypeToCudnn<dataType>::id ,
output.getSize(), // sizes
numFiltersPerGroup,
output.getWidth(),
output.getHeight(),
output.getHeight()*output.getWidth()*output.getDepth(), //strides
output.getHeight()*output.getWidth(),
output.getHeight(),
1)) ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id,
data.getSize(),
data.getDepth() / numGroups,
data.getWidth(),
data.getHeight(),
data.getHeight()*data.getWidth()*data.getDepth(), //strides
data.getHeight()*data.getWidth(),
data.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
filters.getDepth(),
filters.getWidth(),
filters.getHeight())) ;
if (biases) {
CHECK(cudnnCreateTensorDescriptor(&biasesDesc)) ;
biasesDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptor(biasesDesc,
CUDNN_TENSOR_NCHW,
DataTypeToCudnn<dataType>::id ,
1,
biases.getNumElements() / numGroups,
1,
1)) ;
}
// Get convolution descriptor
CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ;
convDescInitialized = true ;
CHECK(cudnnSetConvolution2dDescriptor(convDesc,
padLeft, padTop,
strideX, strideY,
1,1, // upscale
CUDNN_CROSS_CORRELATION)) ;
// Sanity check
#if 1
{
int n, c, h, w ;
cudnnGetConvolution2dForwardOutputDim(convDesc,
dataDesc,
filtersDesc,
&n, &c, &w, &h) ;
bool sane =
output.getSize() == n &&
numFiltersPerGroup == c &&
output.getWidth() == w &&
output.getHeight() == h ;
assert(sane) ;
}
#endif
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ;
if (!context.getCudaHelper().cudnnConvolutionFwdSpecificAlgo) {
// Determine algorithm automatically
CHECK(cudnnGetConvolutionForwardAlgorithm(handle,
dataDesc,
filtersDesc,
convDesc,
outputDesc,
context.getCudaHelper().cudnnConvolutionFwdPreference,
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionFwdAlgo)) ;
}
// Get workspace size
CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle,
dataDesc,
filtersDesc,
convDesc,
outputDesc,
context.getCudaHelper().cudnnConvolutionFwdAlgo,
&context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed)) ;
// Get workspace
if (context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed > 0) {
workSpace = context.getWorkspace(vl::VLDT_GPU, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed) ;
if (workSpace == NULL) {
error = context.getLastError() ;
goto done ;
}
}
// Perform convolution for each filter group
for (int g = 0 ; g < numGroups ; ++g) {
ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * filters.getDepth()) * g ;
ptrdiff_t filtersGrpOffset = (filters.getHeight() * filters.getWidth() * filters.getDepth()) * numFiltersPerGroup * g ;
ptrdiff_t outputGrpOffset = (output.getHeight() * output.getWidth() * numFiltersPerGroup) * g ;
ptrdiff_t biasesGrpOffset = numFiltersPerGroup * g ;
type alpha = dataMult ;
type beta = outputMult ;
CHECK(cudnnConvolutionForward(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionFwdAlgo,
workSpace, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
if (biases) {
type alpha = 1.0f ;
type beta = 1.0f ;
#if (CUDNN_VERSION < 4000)
CHECK(cudnnAddTensor(handle,
CUDNN_ADD_SAME_C,
&alpha,
biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
#else
CHECK(cudnnAddTensor(handle,
&alpha,
biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
#endif
}
}
/* cleanup */
done:
if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; }
if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
if (biasesDescInitialized) { cudnnDestroyTensorDescriptor(biasesDesc) ; }
if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; }
return context.passError(error, __func__) ;
}
/* ---------------------------------------------------------------- */
/* nnconv_backward_cudnn */
/* ---------------------------------------------------------------- */
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnconv_cudnn<dataType>::backward(Context& context,
Tensor derData,
Tensor derFilters,
Tensor derBiases,
Tensor data,
Tensor filters,
Tensor derOutput,
int strideY, int strideX,
int padTop, int padBottom,
int padLeft, int padRight,
int dilateY, int dilateX)
{
typedef typename DataTypeTraits<dataType>::type type ;
/* no derDataDesc needed as same as dataDesc */
cudnnTensorDescriptor_t dataDesc, derBiasesDesc, derOutputDesc ;
cudnnFilterDescriptor_t filtersDesc ;
cudnnConvolutionDescriptor_t convDesc ;
bool dataDescInitialized = false ;
bool derBiasesDescInitialized = false ;
bool derOutputDescInitialized = false ;
bool filtersDescInitialized = false ;
bool convDescInitialized = false ;
#if (CUDNN_VERSION >= 3000)
void* workSpace = NULL ;
size_t workSpaceSize = 0 ;
#endif
ptrdiff_t numGroups = 1 ;
ptrdiff_t numFiltersPerGroup = 0 ;
ptrdiff_t filtersVolume = 0 ;
if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ;
if (padLeft != padRight) return vl::VLE_Unsupported ;
if (padTop != padBottom) return vl::VLE_Unsupported ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// Get CuDNN
CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get the dimensions of the tensrors involved
// If derData is specified (hence comptued as output), use this
// tensor as a basis to compute such dimensions, otherwise use derFilters.
if (derData) {
assert(filters) ;
numGroups = derData.getDepth() / filters.getDepth() ;
numFiltersPerGroup = filters.getSize() / numGroups ;
filtersVolume = filters.getHeight() * filters.getWidth() * filters.getDepth() ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id ,
derData.getSize(),
derData.getDepth() / numGroups,
derData.getWidth(),
derData.getHeight(),
derData.getHeight()*derData.getWidth()*derData.getDepth(), //strides
derData.getHeight()*derData.getWidth(),
derData.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id ,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
filters.getDepth(),
filters.getWidth(),
filters.getHeight())) ;
} else if (derFilters) {
assert(data) ;
numGroups = data.getDepth() / derFilters.getDepth() ;
numFiltersPerGroup = derFilters.getSize() / numGroups ;
filtersVolume = derFilters.getHeight() * derFilters.getWidth() * derFilters.getDepth() ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id ,
data.getSize(),
data.getDepth() / numGroups,
data.getWidth(),
data.getHeight(),
data.getHeight()*data.getWidth()*data.getDepth(), //strides
data.getHeight()*data.getWidth(),
data.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id ,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
derFilters.getDepth(),
derFilters.getWidth(),
derFilters.getHeight())) ;
}
CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ;
convDescInitialized = true ;
CHECK(cudnnSetConvolution2dDescriptor(convDesc,
padLeft, padTop,
strideX, strideY,
1,1, // upscale
CUDNN_CROSS_CORRELATION)) ;
// Must have derOutput for all derivatives
assert(derOutput) ;
CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ;
derOutputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc,
DataTypeToCudnn<dataType>::id ,
derOutput.getSize(), // sizes
numFiltersPerGroup,
derOutput.getWidth(),
derOutput.getHeight(),
derOutput.getHeight()*derOutput.getWidth()*derOutput.getDepth(), //strides
derOutput.getHeight()*derOutput.getWidth(),
derOutput.getHeight(),
1)) ;
// for derivatives w.r.t. bias
if (derBiases) {
CHECK(cudnnCreateTensorDescriptor(&derBiasesDesc)) ;
derBiasesDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptor(derBiasesDesc,
CUDNN_TENSOR_NCHW,
DataTypeToCudnn<dataType>::id ,
1,
derBiases.getNumElements() / numGroups,
1,
1)) ;
}
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ;
#if (CUDNN_VERSION >= 3000)
if (derFilters) {
// Get filter derivatives algorithm
CHECK(cudnnGetConvolutionBackwardFilterAlgorithm
(handle,
dataDesc,
derOutputDesc,
convDesc,
filtersDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterPreference,
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionBwdFilterAlgo)) ;
// Get workspace size
CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize
(handle,
dataDesc,
derOutputDesc,
convDesc,
filtersDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterAlgo,
&context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed)) ;
workSpaceSize = ::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed) ;
}
if (derData) {
// Get data derivatives
CHECK(cudnnGetConvolutionBackwardDataAlgorithm
(handle,
filtersDesc,
derOutputDesc,
convDesc,
dataDesc,
context.getCudaHelper().cudnnConvolutionBwdDataPreference,
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionBwdDataAlgo)) ;
// Get workspace size
CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize
(handle,
filtersDesc,
derOutputDesc,
convDesc,
dataDesc,
context.getCudaHelper().cudnnConvolutionBwdDataAlgo,
&context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed)) ;
workSpaceSize = ::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed) ;
}
// Get workspace
if (workSpaceSize > 0) {
workSpace = context.getWorkspace(vl::VLDT_GPU, workSpaceSize) ;
if (workSpace == NULL) {
error = context.getLastError() ;
goto done ;
}
}
#endif
// Perform backward convolution for each filter group
for (int g = 0 ; g < numGroups ; ++g) {
ptrdiff_t filtersGrpOffset = filtersVolume * numFiltersPerGroup * g ;
ptrdiff_t derOutputGrpOffset = (derOutput.getHeight() * derOutput.getWidth() * numFiltersPerGroup) * g ;
if (derBiases) {
ptrdiff_t derBiasesGrpOffset = numFiltersPerGroup * g ;
type alpha = 1 ;
type beta = 0 ;
CHECK(cudnnConvolutionBackwardBias
(handle,
&alpha,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
&beta,
derBiasesDesc, (type*)derBiases.getMemory() + derBiasesGrpOffset)) ;
}
if (derFilters) {
ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * derFilters.getDepth()) * g ;
type alpha = 1 ;
type beta = 0 ;
#if (CUDNN_VERSION >= 3000)
CHECK(
IF_CUDNN_GE4(cudnnConvolutionBackwardFilter)
IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardFilter_v3)
(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterAlgo,
workSpace, workSpaceSize,
&beta,
filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ;
#else
CHECK(cudnnConvolutionBackwardFilter
(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
&beta,
filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ;
#endif
}
if (derData) {
ptrdiff_t dataGrpOffset = (derData.getHeight() * derData.getWidth() * filters.getDepth()) * g ;
type alpha = 1 ;
type beta = 0 ;
#if (CUDNN_VERSION >= 3000)
CHECK(
IF_CUDNN_GE4(cudnnConvolutionBackwardData)
IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardData_v3)
(handle,
&alpha,
filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionBwdDataAlgo,
workSpace, workSpaceSize,
&beta,
dataDesc, (type*)derData.getMemory() + dataGrpOffset)) ;
#else
CHECK(cudnnConvolutionBackwardData
(handle,
&alpha,
filtersDesc, filters.getMemory() + filtersGrpOffset,
derOutputDesc, derOutput.getMemory() + derOutputGrpOffset,
convDesc,
&beta,
dataDesc, derData.getMemory() + dataGrpOffset)) ;
#endif
}
}
done:
if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; }
if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; }
if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; }
if (derBiasesDescInitialized) { cudnnDestroyTensorDescriptor(derBiasesDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
return context.passError(error, __func__) ;
}
} }
// Instantiations
template struct vl::impl::nnconv_cudnn<vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::nnconv_cudnn<vl::VLDT_Double> ;
#endif
| a023d17ec9b4fe411f3ec13c59a75496e0fbc799.cu | // @file nnconv_cudnn.cu
// @brief Convolution block CuDNN-based implementation.
// @author Andrea Vedaldi
/*
Copyright (C) 2015-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#if !defined(ENABLE_GPU) | !defined(ENABLE_CUDNN)
#error "nnconv_cudnn.hpp cannot be compiled without GPU and CUDNN support."
#endif
#include "nnconv_cudnn.hpp"
#include "cudnnhelper.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <algorithm>
using namespace vl ;
#define CHECK(x) \
{ \
cudnnError = x ; \
if (cudnnError != CUDNN_STATUS_SUCCESS) { \
error = context.setError(context.getCudaHelper().catchCudnnError(cudnnError, \
STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \
goto done ; \
} }
/* ---------------------------------------------------------------- */
/* nnconv_forward_cudnn */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnconv_cudnn<dataType>::forward(Context& context,
Tensor output, double outputMult,
Tensor data, double dataMult,
Tensor filters,
Tensor biases,
int strideY, int strideX,
int padTop, int padBottom,
int padLeft, int padRight,
int dilateY, int dilateX)
{
assert(output) ;
assert(data) ;
assert(filters) ;
typedef typename DataTypeTraits<dataType>::type type ;
cudnnTensorDescriptor_t outputDesc, biasesDesc, dataDesc ;
cudnnFilterDescriptor_t filtersDesc ;
cudnnConvolutionDescriptor_t convDesc ;
bool outputDescInitialized = false ;
bool biasesDescInitialized = false ;
bool dataDescInitialized = false ;
bool filtersDescInitialized = false ;
bool convDescInitialized = false ;
void* workSpace = NULL ;
int numGroups = data.getDepth() / filters.getDepth() ;
int numFiltersPerGroup = filters.getSize() / numGroups ;
if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ;
if (padLeft != padRight) return vl::VLE_Unsupported ;
if (padTop != padBottom) return vl::VLE_Unsupported ;
if (filters.getHeight() > data.getHeight()) return vl::VLE_Unsupported ;
if (filters.getWidth() > data.getWidth()) return vl::VLE_Unsupported ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// Get CuDNN
CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get tensor descripotrs
CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ;
outputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(outputDesc,
DataTypeToCudnn<dataType>::id ,
output.getSize(), // sizes
numFiltersPerGroup,
output.getWidth(),
output.getHeight(),
output.getHeight()*output.getWidth()*output.getDepth(), //strides
output.getHeight()*output.getWidth(),
output.getHeight(),
1)) ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id,
data.getSize(),
data.getDepth() / numGroups,
data.getWidth(),
data.getHeight(),
data.getHeight()*data.getWidth()*data.getDepth(), //strides
data.getHeight()*data.getWidth(),
data.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
filters.getDepth(),
filters.getWidth(),
filters.getHeight())) ;
if (biases) {
CHECK(cudnnCreateTensorDescriptor(&biasesDesc)) ;
biasesDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptor(biasesDesc,
CUDNN_TENSOR_NCHW,
DataTypeToCudnn<dataType>::id ,
1,
biases.getNumElements() / numGroups,
1,
1)) ;
}
// Get convolution descriptor
CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ;
convDescInitialized = true ;
CHECK(cudnnSetConvolution2dDescriptor(convDesc,
padLeft, padTop,
strideX, strideY,
1,1, // upscale
CUDNN_CROSS_CORRELATION)) ;
// Sanity check
#if 1
{
int n, c, h, w ;
cudnnGetConvolution2dForwardOutputDim(convDesc,
dataDesc,
filtersDesc,
&n, &c, &w, &h) ;
bool sane =
output.getSize() == n &&
numFiltersPerGroup == c &&
output.getWidth() == w &&
output.getHeight() == h ;
assert(sane) ;
}
#endif
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ;
if (!context.getCudaHelper().cudnnConvolutionFwdSpecificAlgo) {
// Determine algorithm automatically
CHECK(cudnnGetConvolutionForwardAlgorithm(handle,
dataDesc,
filtersDesc,
convDesc,
outputDesc,
context.getCudaHelper().cudnnConvolutionFwdPreference,
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionFwdAlgo)) ;
}
// Get workspace size
CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle,
dataDesc,
filtersDesc,
convDesc,
outputDesc,
context.getCudaHelper().cudnnConvolutionFwdAlgo,
&context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed)) ;
// Get workspace
if (context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed > 0) {
workSpace = context.getWorkspace(vl::VLDT_GPU, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed) ;
if (workSpace == NULL) {
error = context.getLastError() ;
goto done ;
}
}
// Perform convolution for each filter group
for (int g = 0 ; g < numGroups ; ++g) {
ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * filters.getDepth()) * g ;
ptrdiff_t filtersGrpOffset = (filters.getHeight() * filters.getWidth() * filters.getDepth()) * numFiltersPerGroup * g ;
ptrdiff_t outputGrpOffset = (output.getHeight() * output.getWidth() * numFiltersPerGroup) * g ;
ptrdiff_t biasesGrpOffset = numFiltersPerGroup * g ;
type alpha = dataMult ;
type beta = outputMult ;
CHECK(cudnnConvolutionForward(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionFwdAlgo,
workSpace, context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
if (biases) {
type alpha = 1.0f ;
type beta = 1.0f ;
#if (CUDNN_VERSION < 4000)
CHECK(cudnnAddTensor(handle,
CUDNN_ADD_SAME_C,
&alpha,
biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
#else
CHECK(cudnnAddTensor(handle,
&alpha,
biasesDesc, (type const*)biases.getMemory() + biasesGrpOffset,
&beta,
outputDesc, (type*)output.getMemory() + outputGrpOffset)) ;
#endif
}
}
/* cleanup */
done:
if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; }
if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
if (biasesDescInitialized) { cudnnDestroyTensorDescriptor(biasesDesc) ; }
if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; }
return context.passError(error, __func__) ;
}
/* ---------------------------------------------------------------- */
/* nnconv_backward_cudnn */
/* ---------------------------------------------------------------- */
template<vl::DataType dataType>
vl::ErrorCode
vl::impl::nnconv_cudnn<dataType>::backward(Context& context,
Tensor derData,
Tensor derFilters,
Tensor derBiases,
Tensor data,
Tensor filters,
Tensor derOutput,
int strideY, int strideX,
int padTop, int padBottom,
int padLeft, int padRight,
int dilateY, int dilateX)
{
typedef typename DataTypeTraits<dataType>::type type ;
/* no derDataDesc needed as same as dataDesc */
cudnnTensorDescriptor_t dataDesc, derBiasesDesc, derOutputDesc ;
cudnnFilterDescriptor_t filtersDesc ;
cudnnConvolutionDescriptor_t convDesc ;
bool dataDescInitialized = false ;
bool derBiasesDescInitialized = false ;
bool derOutputDescInitialized = false ;
bool filtersDescInitialized = false ;
bool convDescInitialized = false ;
#if (CUDNN_VERSION >= 3000)
void* workSpace = NULL ;
size_t workSpaceSize = 0 ;
#endif
ptrdiff_t numGroups = 1 ;
ptrdiff_t numFiltersPerGroup = 0 ;
ptrdiff_t filtersVolume = 0 ;
if (dilateX != 1 || dilateY != 1) return vl::VLE_Unsupported ;
if (padLeft != padRight) return vl::VLE_Unsupported ;
if (padTop != padBottom) return vl::VLE_Unsupported ;
cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ;
vl::ErrorCode error = vl::VLE_Success ;
cudnnHandle_t handle ;
// Get CuDNN
CHECK(context.getCudaHelper().getCudnnHandle(&handle)) ;
// Get the dimensions of the tensrors involved
// If derData is specified (hence comptued as output), use this
// tensor as a basis to compute such dimensions, otherwise use derFilters.
if (derData) {
assert(filters) ;
numGroups = derData.getDepth() / filters.getDepth() ;
numFiltersPerGroup = filters.getSize() / numGroups ;
filtersVolume = filters.getHeight() * filters.getWidth() * filters.getDepth() ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id ,
derData.getSize(),
derData.getDepth() / numGroups,
derData.getWidth(),
derData.getHeight(),
derData.getHeight()*derData.getWidth()*derData.getDepth(), //strides
derData.getHeight()*derData.getWidth(),
derData.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id ,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
filters.getDepth(),
filters.getWidth(),
filters.getHeight())) ;
} else if (derFilters) {
assert(data) ;
numGroups = data.getDepth() / derFilters.getDepth() ;
numFiltersPerGroup = derFilters.getSize() / numGroups ;
filtersVolume = derFilters.getHeight() * derFilters.getWidth() * derFilters.getDepth() ;
CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ;
dataDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(dataDesc,
DataTypeToCudnn<dataType>::id ,
data.getSize(),
data.getDepth() / numGroups,
data.getWidth(),
data.getHeight(),
data.getHeight()*data.getWidth()*data.getDepth(), //strides
data.getHeight()*data.getWidth(),
data.getHeight(),
1)) ;
CHECK(cudnnCreateFilterDescriptor(&filtersDesc)) ;
filtersDescInitialized = true ;
CHECK(cudnnSetFilter4dDescriptor(filtersDesc,
DataTypeToCudnn<dataType>::id ,
IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA)
numFiltersPerGroup,
derFilters.getDepth(),
derFilters.getWidth(),
derFilters.getHeight())) ;
}
CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ;
convDescInitialized = true ;
CHECK(cudnnSetConvolution2dDescriptor(convDesc,
padLeft, padTop,
strideX, strideY,
1,1, // upscale
CUDNN_CROSS_CORRELATION)) ;
// Must have derOutput for all derivatives
assert(derOutput) ;
CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ;
derOutputDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc,
DataTypeToCudnn<dataType>::id ,
derOutput.getSize(), // sizes
numFiltersPerGroup,
derOutput.getWidth(),
derOutput.getHeight(),
derOutput.getHeight()*derOutput.getWidth()*derOutput.getDepth(), //strides
derOutput.getHeight()*derOutput.getWidth(),
derOutput.getHeight(),
1)) ;
// for derivatives w.r.t. bias
if (derBiases) {
CHECK(cudnnCreateTensorDescriptor(&derBiasesDesc)) ;
derBiasesDescInitialized = true ;
CHECK(cudnnSetTensor4dDescriptor(derBiasesDesc,
CUDNN_TENSOR_NCHW,
DataTypeToCudnn<dataType>::id ,
1,
derBiases.getNumElements() / numGroups,
1,
1)) ;
}
context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ;
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ;
#if (CUDNN_VERSION >= 3000)
if (derFilters) {
// Get filter derivatives algorithm
CHECK(cudnnGetConvolutionBackwardFilterAlgorithm
(handle,
dataDesc,
derOutputDesc,
convDesc,
filtersDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterPreference,
context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionBwdFilterAlgo)) ;
// Get workspace size
CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize
(handle,
dataDesc,
derOutputDesc,
convDesc,
filtersDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterAlgo,
&context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed)) ;
workSpaceSize = std::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed) ;
}
if (derData) {
// Get data derivatives
CHECK(cudnnGetConvolutionBackwardDataAlgorithm
(handle,
filtersDesc,
derOutputDesc,
convDesc,
dataDesc,
context.getCudaHelper().cudnnConvolutionBwdDataPreference,
context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceLimit,
&context.getCudaHelper().cudnnConvolutionBwdDataAlgo)) ;
// Get workspace size
CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize
(handle,
filtersDesc,
derOutputDesc,
convDesc,
dataDesc,
context.getCudaHelper().cudnnConvolutionBwdDataAlgo,
&context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed)) ;
workSpaceSize = std::max(workSpaceSize, context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed) ;
}
// Get workspace
if (workSpaceSize > 0) {
workSpace = context.getWorkspace(vl::VLDT_GPU, workSpaceSize) ;
if (workSpace == NULL) {
error = context.getLastError() ;
goto done ;
}
}
#endif
// Perform backward convolution for each filter group
for (int g = 0 ; g < numGroups ; ++g) {
ptrdiff_t filtersGrpOffset = filtersVolume * numFiltersPerGroup * g ;
ptrdiff_t derOutputGrpOffset = (derOutput.getHeight() * derOutput.getWidth() * numFiltersPerGroup) * g ;
if (derBiases) {
ptrdiff_t derBiasesGrpOffset = numFiltersPerGroup * g ;
type alpha = 1 ;
type beta = 0 ;
CHECK(cudnnConvolutionBackwardBias
(handle,
&alpha,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
&beta,
derBiasesDesc, (type*)derBiases.getMemory() + derBiasesGrpOffset)) ;
}
if (derFilters) {
ptrdiff_t dataGrpOffset = (data.getHeight() * data.getWidth() * derFilters.getDepth()) * g ;
type alpha = 1 ;
type beta = 0 ;
#if (CUDNN_VERSION >= 3000)
CHECK(
IF_CUDNN_GE4(cudnnConvolutionBackwardFilter)
IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardFilter_v3)
(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionBwdFilterAlgo,
workSpace, workSpaceSize,
&beta,
filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ;
#else
CHECK(cudnnConvolutionBackwardFilter
(handle,
&alpha,
dataDesc, (type const*)data.getMemory() + dataGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
&beta,
filtersDesc, (type*)derFilters.getMemory() + filtersGrpOffset)) ;
#endif
}
if (derData) {
ptrdiff_t dataGrpOffset = (derData.getHeight() * derData.getWidth() * filters.getDepth()) * g ;
type alpha = 1 ;
type beta = 0 ;
#if (CUDNN_VERSION >= 3000)
CHECK(
IF_CUDNN_GE4(cudnnConvolutionBackwardData)
IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardData_v3)
(handle,
&alpha,
filtersDesc, (type const*)filters.getMemory() + filtersGrpOffset,
derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset,
convDesc,
context.getCudaHelper().cudnnConvolutionBwdDataAlgo,
workSpace, workSpaceSize,
&beta,
dataDesc, (type*)derData.getMemory() + dataGrpOffset)) ;
#else
CHECK(cudnnConvolutionBackwardData
(handle,
&alpha,
filtersDesc, filters.getMemory() + filtersGrpOffset,
derOutputDesc, derOutput.getMemory() + derOutputGrpOffset,
convDesc,
&beta,
dataDesc, derData.getMemory() + dataGrpOffset)) ;
#endif
}
}
done:
if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; }
if (filtersDescInitialized) { cudnnDestroyFilterDescriptor(filtersDesc) ; }
if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; }
if (derBiasesDescInitialized) { cudnnDestroyTensorDescriptor(derBiasesDesc) ; }
if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; }
return context.passError(error, __func__) ;
}
} }
// Instantiations
template struct vl::impl::nnconv_cudnn<vl::VLDT_Float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::nnconv_cudnn<vl::VLDT_Double> ;
#endif
|
67e1e1ea5e8fe71d01b180638e19bc3015deb72e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Tools.hpp"
__global__ static void newtonAnimation(uchar4* ptrDevPixels, int w, int h, DomaineMaths domainNew);
__device__ static int newton(float x, float y);
void launchNewtonAnimation(uchar4* ptrDevPixels, int w, int h, const DomaineMaths& domainNew){
dim3 blockPerGrid = dim3(32, 16, 1);
dim3 threadPerBlock = dim3(32, 16, 1);
hipLaunchKernelGGL(( newtonAnimation), dim3(blockPerGrid),dim3(threadPerBlock), 0, 0, ptrDevPixels, w, h, domainNew);
}
__global__ static void newtonAnimation(uchar4* ptrDevPixels, int w, int h, DomaineMaths domainNew){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
float dx = (float) (domainNew.dx / (float) w);
float dy = (float) (domainNew.dy / (float) h);
int tid = j + (i * nbThreadX);
float x, y;
while(tid < (w * h)){
int pixelI = tid / w;
int pixelJ = tid - w * pixelI;
x = domainNew.x0 + pixelJ * dx;
y = domainNew.y0 + pixelI * dy;
int color = newton(x, y);
if(color == 0){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 0;
} else if(color == 1){
ptrDevPixels[tid].x = 255;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 0;
} else if(color == 2){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 255;
ptrDevPixels[tid].z = 0;
} else if(color == 3){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 255;
}
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
struct vector{
float x;
float y;
};
#define LIMIT 1000
#define PRECISION 1
#define CIRCLE 3
#define SQRT3 1.7320508075688772935
__device__ static bool near(float src, float target){
float delta = src - target;
if(delta < 0){
delta = -delta;
}
if(delta <= PRECISION){
return true;
}
return false;
}
__device__ static int newton(float x, float y){
vector xn = {x,y};
int current = 0;
int times = 0;
int last = 0;
while(current < LIMIT){
float fnx = xn.x * xn.x * xn.x - 3 * xn.x * xn.y * xn.y - 1;
float fny = xn.y * xn.y * xn.y - 3 * xn.x * xn.x * xn.y;
float ja = 3 * xn.x * xn.x - 3 * xn.y * xn.y;
float jd = 3 * xn.y * xn.y - 3 * xn.x * xn.x;
float jbc = 6 * xn.x * xn.y;
float det = ja * jd - jbc * jbc; //det(A) = a*d - b*c
float dx = (jd / det) * fnx + (jbc / det) * fny;
float dy = (jbc / det) * fnx + (ja / det) * fny;
xn.x = xn.x - dx;
xn.y = xn.y - dy;
if(near(xn.x, 1) && near(xn.y, 0)){
if(times == CIRCLE && last == 1){
return 1;
}
if(last == 1){
++times;
} else {
times = 1;
}
last = 1;
} else if(near(xn.x, -1/2) && near(xn.y, SQRT3 / 2)){
if(times == CIRCLE && last == 2){
return 2;
}
if(last == 2){
++times;
} else {
times = 1;
}
last = 2;
} else if(near(xn.x, -1/2) && near(xn.y, -SQRT3 / 2)){
if(times == CIRCLE && last == 3){
return 3;
}
if(last == 3){
++times;
} else {
times = 1;
}
last = 3;
} else {
times = 0;
last = 0;
}
++current;
}
//Once we are here, it means that we are out the loop: black point
return 0;
}
| 67e1e1ea5e8fe71d01b180638e19bc3015deb72e.cu | #include "Tools.hpp"
__global__ static void newtonAnimation(uchar4* ptrDevPixels, int w, int h, DomaineMaths domainNew);
__device__ static int newton(float x, float y);
void launchNewtonAnimation(uchar4* ptrDevPixels, int w, int h, const DomaineMaths& domainNew){
dim3 blockPerGrid = dim3(32, 16, 1);
dim3 threadPerBlock = dim3(32, 16, 1);
newtonAnimation<<<blockPerGrid,threadPerBlock>>>(ptrDevPixels, w, h, domainNew);
}
__global__ static void newtonAnimation(uchar4* ptrDevPixels, int w, int h, DomaineMaths domainNew){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
float dx = (float) (domainNew.dx / (float) w);
float dy = (float) (domainNew.dy / (float) h);
int tid = j + (i * nbThreadX);
float x, y;
while(tid < (w * h)){
int pixelI = tid / w;
int pixelJ = tid - w * pixelI;
x = domainNew.x0 + pixelJ * dx;
y = domainNew.y0 + pixelI * dy;
int color = newton(x, y);
if(color == 0){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 0;
} else if(color == 1){
ptrDevPixels[tid].x = 255;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 0;
} else if(color == 2){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 255;
ptrDevPixels[tid].z = 0;
} else if(color == 3){
ptrDevPixels[tid].x = 0;
ptrDevPixels[tid].y = 0;
ptrDevPixels[tid].z = 255;
}
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
struct vector{
float x;
float y;
};
#define LIMIT 1000
#define PRECISION 1
#define CIRCLE 3
#define SQRT3 1.7320508075688772935
__device__ static bool near(float src, float target){
float delta = src - target;
if(delta < 0){
delta = -delta;
}
if(delta <= PRECISION){
return true;
}
return false;
}
__device__ static int newton(float x, float y){
vector xn = {x,y};
int current = 0;
int times = 0;
int last = 0;
while(current < LIMIT){
float fnx = xn.x * xn.x * xn.x - 3 * xn.x * xn.y * xn.y - 1;
float fny = xn.y * xn.y * xn.y - 3 * xn.x * xn.x * xn.y;
float ja = 3 * xn.x * xn.x - 3 * xn.y * xn.y;
float jd = 3 * xn.y * xn.y - 3 * xn.x * xn.x;
float jbc = 6 * xn.x * xn.y;
float det = ja * jd - jbc * jbc; //det(A) = a*d - b*c
float dx = (jd / det) * fnx + (jbc / det) * fny;
float dy = (jbc / det) * fnx + (ja / det) * fny;
xn.x = xn.x - dx;
xn.y = xn.y - dy;
if(near(xn.x, 1) && near(xn.y, 0)){
if(times == CIRCLE && last == 1){
return 1;
}
if(last == 1){
++times;
} else {
times = 1;
}
last = 1;
} else if(near(xn.x, -1/2) && near(xn.y, SQRT3 / 2)){
if(times == CIRCLE && last == 2){
return 2;
}
if(last == 2){
++times;
} else {
times = 1;
}
last = 2;
} else if(near(xn.x, -1/2) && near(xn.y, -SQRT3 / 2)){
if(times == CIRCLE && last == 3){
return 3;
}
if(last == 3){
++times;
} else {
times = 1;
}
last = 3;
} else {
times = 0;
last = 0;
}
++current;
}
//Once we are here, it means that we are out the loop: black point
return 0;
}
|
cb34e109c5bd46315b19320943c4c6848efac029.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
by Didem Unat
3D 7-point jacobi
Written to be used as an input program to mint translator
See the alloc2D function, which allocates contiguous memory space to
the array.
*/
//#include "common.h"
#include <stdio.h>
#include <math.h>
//#include <omp.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/time.h>
#define REAL double
#define FLOPS 8
#define chunk 64
const double kMicro = 1.0e-6;
double ***alloc3D(int n,int m,int k)
{
double ***m_buffer = NULL;
// double ***m_buffer = ((void *)0);
int nx = n;
int ny = m;
int nk = k;
m_buffer = ((double ***)(malloc(sizeof(double **) * nk)));
m_buffer?((void )0) : ((__assert_fail("m_buffer","heat3D.c",32,__PRETTY_FUNCTION__) , ((void )0)));
double **m_tempzy = (double **)(malloc(sizeof(double *) * nk * ny));
double *m_tempzyx = (double *)(malloc(sizeof(double ) * nx * ny * nk));
int z;
int y;
for (z = 0; z < nk; (z++ , m_tempzy += ny)) {
m_buffer[z] = m_tempzy;
for (y = 0; y < ny; (y++ , m_tempzyx += nx)) {
m_buffer[z][y] = m_tempzyx;
}
}
return m_buffer;
}
double getTime()
{
struct timeval TV;
// const int RC = gettimeofday(&TV,((void *)0));
const int RC = gettimeofday(&TV, NULL);
if (RC == -1) {
printf("ERROR: Bad call to gettimeofday\n");
return (-1);
}
return ((double )TV . tv_sec) + kMicro * ((double )TV . tv_usec);
// end getTime()
}
//allocate 3D array
double ***alloc3D_(int n,int m,int k)
{
// double ***E = ((void *)0);
double ***E = NULL;
int nx = n;
int ny = m;
int nk = k;
E = ((double ***)(malloc(sizeof(double **) * nk)));
E?((void )0) : ((__assert_fail("E","heat3D.c",71,__PRETTY_FUNCTION__) , ((void )0)));
E[0] = ((double **)(malloc(sizeof(double *) * nk * ny)));
E[0][0] = ((double *)(malloc(sizeof(double ) * nx * ny * nk)));
int jj;
int kk;
for (kk = 0; kk < nk; kk++) {
if (kk > 0) {
E[kk] = E[kk - 1] + ny;
E[kk][0] = E[kk - 1][0] + ny * nx;
}
for (jj = 1; jj < ny; jj++) {
E[kk][jj] = E[kk][jj - 1] + nx;
}
}
return E;
}
void free3D(double ***E)
{
//int k=0;
/* for(k=0 ; k < m ; k++)
{
free(E[k]);
}*/
free(E[0][0]);
free(E[0]);
free(E);
}
void init(double ***E,int N,int M,int K)
{
int i;
int j;
int k;
for (k = 0; k < K; k++)
for (i = 0; i < M; i++)
for (j = 0; j < N; j++) {
E[k][i][j] = 1.0;
if (i == 0 || i == M - 1 || j == 0 || j == N - 1 || k == 0 || k == K - 1) {
E[k][i][j] = 0.0;
}
}
}
//calculate l2norm for comparison
void calculatel2Norm(double ***E,int N,int M,int K,int nIters)
{
int i;
int j;
int k = 0;
float mx = (-1);
float l2norm = 0;
for (k = 1; k <= K; k++) {
for (j = 1; j <= M; j++) {
for (i = 1; i <= N; i++) {
l2norm += E[k][j][i] * E[k][j][i];
if (E[k][j][i] > mx) {
mx = E[k][j][i];
}
}
}
}
l2norm /= ((float )(N * M * K));
l2norm = (sqrt(l2norm));
printf(":N %d M %d K %d , iteration %d\n",N,M,K,nIters);
printf(":max: %20.12e, l2norm: %20.12e\n",mx,l2norm);
}
__global__ static void mint_1_1527(int n,int m,int k,double c0,double c1,hipPitchedPtr dev_2_Unew,hipPitchedPtr dev_1_Uold,int num2blockDim_1_1527,float invYnumblockDim_1_1527);
int main(int argc,char *argv[])
{
int n = 256;
int m = 256;
int k = 256;
double c0 = 0.5;
double c1 = -0.25;
double ***Unew;
double ***Uold;
Unew = alloc3D(n + 2,m + 2,k + 2);
Uold = alloc3D(n + 2,m + 2,k + 2);
init(Unew,n + 2,m + 2,k + 2);
init(Uold,n + 2,m + 2,k + 2);
int T = 20;
printf("\n=====Timings (sec) for 7-Point Jacobi, Solving Heat Eqn ");
if (sizeof(double ) == 4) {
printf(" (Single Precision) =====\n");
}
if (sizeof(double ) == 8) {
printf(" (Double Precision) =====\n");
}
printf("Kernel\t Time(sec)\tGflops \tBW-ideal(GB/s)\tBW-algorithm (N=(%d,%d) iters=%d)\n",n,n,T);
printf("------\t----------\t--------\t--------------\t------------\n");
int nIters = 0;
double time_elapsed;
double Gflops = 0.0;
/* Mint: Replaced Pragma: #pragma mint copy( Uold, toDevice,( n+2 ),( m+2 ),( k+2 ) ) */
hipError_t stat_dev_1_Uold;
hipExtent ext_dev_1_Uold = make_hipExtent(((n+2)) * sizeof(double ),((m+2)),((k+2)));
/* Mint: Malloc on the device */
hipPitchedPtr dev_1_Uold;
stat_dev_1_Uold = hipMalloc3D(&dev_1_Uold,ext_dev_1_Uold);
if (stat_dev_1_Uold != hipSuccess)
fprintf(stderr,"%s\n",hipGetErrorString(stat_dev_1_Uold));
/* Mint: Copy host to device */
hipMemcpy3DParms param_1_dev_1_Uold = {0};
param_1_dev_1_Uold . srcPtr = make_hipPitchedPtr(((void *)Uold[0][0]),((n+2)) * sizeof(double ),((n+2)),((m+2)));
param_1_dev_1_Uold . dstPtr = dev_1_Uold;
param_1_dev_1_Uold . extent = ext_dev_1_Uold;
param_1_dev_1_Uold . kind = hipMemcpyHostToDevice;
stat_dev_1_Uold = hipMemcpy3D(¶m_1_dev_1_Uold);
if (stat_dev_1_Uold != hipSuccess)
fprintf(stderr,"%s\n",hipGetErrorString(stat_dev_1_Uold));
/* Mint: Replaced Pragma: #pragma mint copy( Unew, toDevice,( n+2 ), m+2,( k+2 ) ) */
hipError_t stat_dev_2_Unew;
hipExtent ext_dev_2_Unew = make_hipExtent(((n+2)) * sizeof(double ),(m+2),((k+2)));
/* Mint: Malloc on the device */
hipPitchedPtr dev_2_Unew;
stat_dev_2_Unew = hipMalloc3D(&dev_2_Unew,ext_dev_2_Unew);
if (stat_dev_2_Unew != hipSuccess)
fprintf(stderr,"%s\n",hipGetErrorString(stat_dev_2_Unew));
/* Mint: Copy host to device */
hipMemcpy3DParms param_2_dev_2_Unew = {0};
param_2_dev_2_Unew . srcPtr = make_hipPitchedPtr(((void *)Unew[0][0]),((n+2)) * sizeof(double ),((n+2)),(m+2));
param_2_dev_2_Unew . dstPtr = dev_2_Unew;
param_2_dev_2_Unew . extent = ext_dev_2_Unew;
param_2_dev_2_Unew . kind = hipMemcpyHostToDevice;
stat_dev_2_Unew = hipMemcpy3D(¶m_2_dev_2_Unew);
if (stat_dev_2_Unew != hipSuccess)
fprintf(stderr,"%s\n",hipGetErrorString(stat_dev_2_Unew));
{
time_elapsed = getTime();
int t = 0;
while(t < T){
t++;
int x;
int y;
int z;
//7-point stencil
#pragma mint for nest(all) tile(16,16,16) chunksize(1,1,16)
int num3blockDim_1_1527 = (k - 1 + 1) % 16 == 0?(k - 1 + 1) / 16 : (k - 1 + 1) / 16 + 1;
int num2blockDim_1_1527 = (m - 1 + 1) % 16 == 0?(m - 1 + 1) / 16 : (m - 1 + 1) / 16 + 1;
int num1blockDim_1_1527 = (n - 1 + 1) % 16 == 0?(n - 1 + 1) / 16 : (n - 1 + 1) / 16 + 1;
float invYnumblockDim_1_1527 = 1.00000F / num2blockDim_1_1527;
dim3 blockDim_1_1527(16,16,1);
dim3 gridDim_1_1527(num1blockDim_1_1527,num2blockDim_1_1527*num3blockDim_1_1527);
hipLaunchKernelGGL(( mint_1_1527), dim3(gridDim_1_1527),dim3(blockDim_1_1527), 0, 0, n,m,k,c0,c1,dev_2_Unew,dev_1_Uold,num2blockDim_1_1527,invYnumblockDim_1_1527);
hipDeviceSynchronize();
hipError_t err_mint_1_1527 = hipGetLastError();
if (err_mint_1_1527) {
fprintf(stderr,"In %s, %s\n","mint_1_1527",hipGetErrorString(err_mint_1_1527));
}
#pragma mint single
{
double ***tmp;
void *dev_tmp;
dev_tmp = dev_1_Uold . ptr;
dev_1_Uold . ptr = dev_2_Unew . ptr;
dev_2_Unew . ptr = dev_tmp;
nIters = t;
}
//end of while
}
//end of parallel region
}
hipFree(dev_2_Unew . ptr);
hipFree(dev_1_Uold . ptr);
#pragma mint copy(Uold, fromDevice, (n+2), (m+2), (k+2))
time_elapsed = getTime() - time_elapsed;
Gflops = ((double )((nIters * n * m * k) * 1.0e-9 * 8)) / time_elapsed;
printf("%s%3.3f \t%5.3f\n","Heat3D ",time_elapsed,Gflops);
calculatel2Norm(Uold,n,m,k,T);
free3D(Uold);
free3D(Unew);
return 0;
}
__global__ static void mint_1_1527(int n,int m,int k,double c0,double c1,hipPitchedPtr dev_2_Unew,hipPitchedPtr dev_1_Uold,int num2blockDim_1_1527,float invYnumblockDim_1_1527)
{
#define TILE_X 16
#define TILE_Y 16
__device__ __shared__ double _sh_block_Uold[TILE_Y + 2][TILE_X + 2];
double *Unew = (double *)dev_2_Unew . ptr;
int _width = dev_2_Unew . pitch / sizeof(double );
int _slice = dev_2_Unew . ysize * _width;
double *Uold = (double *)dev_1_Uold . ptr;
float blocksInY = num2blockDim_1_1527;
float invBlocksInY = invYnumblockDim_1_1527;
int _p_x;
int _p_y;
int _p_z;
{
int _upperb_y = m;
int _upperb_x = n;
int _idx = threadIdx.x + 1;
int _gidx = _idx + blockDim.x * blockIdx.x;
int _idy = threadIdx.y + 1;
int _gidy = _idy + blockDim.y * 1 * blockIdx.y;
int _idz = threadIdx.z + 1;
int blockIdxz = blockIdx.y * invBlocksInY;
int blockIdxy = blockIdx.y - blockIdxz * blocksInY;
_gidy = _idy + blockIdxy * blockDim.y;
int _gidz = _idz + blockIdxz * 16;
int _index3D = _gidx + _gidy * _width + _gidz * _slice;
double up__rUold = Uold[_index3D - _slice];
double _rUold = Uold[_index3D];
_idz = 1;
_idy = threadIdx.y + 1;
_idx = threadIdx.x + 1;
int _borderIdx = _idx;
int _borderIdy = 0;
int _borderGlobalIndexDiff = 0;
_borderIdx = (threadIdx.y == 1?0 : _borderIdx);
_borderIdx = (threadIdx.y == 2?blockDim.x + 1 : _borderIdx);
_borderIdy = (threadIdx.y == 3?blockDim.y + 1 : _borderIdy);
_borderIdy = (threadIdx.y == 1 || threadIdx.y == 2?_idx : _borderIdy);
_borderGlobalIndexDiff = _borderIdx - _idx + _width * (_borderIdy - _idy);
{
int _upper_gidz = _gidz + 16 < k?_gidz + 15 : k;
{
if (_gidy >= 1 && _gidy <= m) {{
if (_gidx >= 1 && _gidx <= n)
for (_gidz = _gidz; _gidz <= _upper_gidz; _gidz += 1) {
_index3D = _gidx + _gidy * _width + _gidz * _slice;
{
_sh_block_Uold[_idy][_idx] = _rUold;
if (threadIdx.y < 4 * 1)
_sh_block_Uold[_borderIdy][_borderIdx] = Uold[_index3D + _borderGlobalIndexDiff];
double down__rUold = Uold[_index3D + _slice];
double _rUnew;
__syncthreads();
_rUnew = c0 * _sh_block_Uold[_idy][_idx] + c1 * (_sh_block_Uold[_idy][_idx - 1] + _sh_block_Uold[_idy][_idx + 1] + _sh_block_Uold[_idy - 1][_idx] + _sh_block_Uold[_idy + 1][_idx] + up__rUold + down__rUold);
Unew[_index3D] = _rUnew;
up__rUold = _rUold;
_rUold = down__rUold;
__syncthreads();
}
}
}
}
}
}
}
}
| cb34e109c5bd46315b19320943c4c6848efac029.cu | /*
by Didem Unat
3D 7-point jacobi
Written to be used as an input program to mint translator
See the alloc2D function, which allocates contiguous memory space to
the array.
*/
//#include "common.h"
#include <stdio.h>
#include <math.h>
//#include <omp.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/time.h>
#define REAL double
#define FLOPS 8
#define chunk 64
const double kMicro = 1.0e-6;
double ***alloc3D(int n,int m,int k)
{
double ***m_buffer = NULL;
// double ***m_buffer = ((void *)0);
int nx = n;
int ny = m;
int nk = k;
m_buffer = ((double ***)(malloc(sizeof(double **) * nk)));
m_buffer?((void )0) : ((__assert_fail("m_buffer","heat3D.c",32,__PRETTY_FUNCTION__) , ((void )0)));
double **m_tempzy = (double **)(malloc(sizeof(double *) * nk * ny));
double *m_tempzyx = (double *)(malloc(sizeof(double ) * nx * ny * nk));
int z;
int y;
for (z = 0; z < nk; (z++ , m_tempzy += ny)) {
m_buffer[z] = m_tempzy;
for (y = 0; y < ny; (y++ , m_tempzyx += nx)) {
m_buffer[z][y] = m_tempzyx;
}
}
return m_buffer;
}
double getTime()
{
struct timeval TV;
// const int RC = gettimeofday(&TV,((void *)0));
const int RC = gettimeofday(&TV, NULL);
if (RC == -1) {
printf("ERROR: Bad call to gettimeofday\n");
return (-1);
}
return ((double )TV . tv_sec) + kMicro * ((double )TV . tv_usec);
// end getTime()
}
//allocate 3D array
double ***alloc3D_(int n,int m,int k)
{
// double ***E = ((void *)0);
double ***E = NULL;
int nx = n;
int ny = m;
int nk = k;
E = ((double ***)(malloc(sizeof(double **) * nk)));
E?((void )0) : ((__assert_fail("E","heat3D.c",71,__PRETTY_FUNCTION__) , ((void )0)));
E[0] = ((double **)(malloc(sizeof(double *) * nk * ny)));
E[0][0] = ((double *)(malloc(sizeof(double ) * nx * ny * nk)));
int jj;
int kk;
for (kk = 0; kk < nk; kk++) {
if (kk > 0) {
E[kk] = E[kk - 1] + ny;
E[kk][0] = E[kk - 1][0] + ny * nx;
}
for (jj = 1; jj < ny; jj++) {
E[kk][jj] = E[kk][jj - 1] + nx;
}
}
return E;
}
void free3D(double ***E)
{
//int k=0;
/* for(k=0 ; k < m ; k++)
{
free(E[k]);
}*/
free(E[0][0]);
free(E[0]);
free(E);
}
void init(double ***E,int N,int M,int K)
{
int i;
int j;
int k;
for (k = 0; k < K; k++)
for (i = 0; i < M; i++)
for (j = 0; j < N; j++) {
E[k][i][j] = 1.0;
if (i == 0 || i == M - 1 || j == 0 || j == N - 1 || k == 0 || k == K - 1) {
E[k][i][j] = 0.0;
}
}
}
//calculate l2norm for comparison
void calculatel2Norm(double ***E,int N,int M,int K,int nIters)
{
int i;
int j;
int k = 0;
float mx = (-1);
float l2norm = 0;
for (k = 1; k <= K; k++) {
for (j = 1; j <= M; j++) {
for (i = 1; i <= N; i++) {
l2norm += E[k][j][i] * E[k][j][i];
if (E[k][j][i] > mx) {
mx = E[k][j][i];
}
}
}
}
l2norm /= ((float )(N * M * K));
l2norm = (sqrt(l2norm));
printf(":N %d M %d K %d , iteration %d\n",N,M,K,nIters);
printf(":max: %20.12e, l2norm: %20.12e\n",mx,l2norm);
}
__global__ static void mint_1_1527(int n,int m,int k,double c0,double c1,cudaPitchedPtr dev_2_Unew,cudaPitchedPtr dev_1_Uold,int num2blockDim_1_1527,float invYnumblockDim_1_1527);
int main(int argc,char *argv[])
{
int n = 256;
int m = 256;
int k = 256;
double c0 = 0.5;
double c1 = -0.25;
double ***Unew;
double ***Uold;
Unew = alloc3D(n + 2,m + 2,k + 2);
Uold = alloc3D(n + 2,m + 2,k + 2);
init(Unew,n + 2,m + 2,k + 2);
init(Uold,n + 2,m + 2,k + 2);
int T = 20;
printf("\n=====Timings (sec) for 7-Point Jacobi, Solving Heat Eqn ");
if (sizeof(double ) == 4) {
printf(" (Single Precision) =====\n");
}
if (sizeof(double ) == 8) {
printf(" (Double Precision) =====\n");
}
printf("Kernel\t Time(sec)\tGflops \tBW-ideal(GB/s)\tBW-algorithm (N=(%d,%d) iters=%d)\n",n,n,T);
printf("------\t----------\t--------\t--------------\t------------\n");
int nIters = 0;
double time_elapsed;
double Gflops = 0.0;
/* Mint: Replaced Pragma: #pragma mint copy( Uold, toDevice,( n+2 ),( m+2 ),( k+2 ) ) */
cudaError_t stat_dev_1_Uold;
cudaExtent ext_dev_1_Uold = make_cudaExtent(((n+2)) * sizeof(double ),((m+2)),((k+2)));
/* Mint: Malloc on the device */
cudaPitchedPtr dev_1_Uold;
stat_dev_1_Uold = cudaMalloc3D(&dev_1_Uold,ext_dev_1_Uold);
if (stat_dev_1_Uold != cudaSuccess)
fprintf(stderr,"%s\n",cudaGetErrorString(stat_dev_1_Uold));
/* Mint: Copy host to device */
cudaMemcpy3DParms param_1_dev_1_Uold = {0};
param_1_dev_1_Uold . srcPtr = make_cudaPitchedPtr(((void *)Uold[0][0]),((n+2)) * sizeof(double ),((n+2)),((m+2)));
param_1_dev_1_Uold . dstPtr = dev_1_Uold;
param_1_dev_1_Uold . extent = ext_dev_1_Uold;
param_1_dev_1_Uold . kind = cudaMemcpyHostToDevice;
stat_dev_1_Uold = cudaMemcpy3D(¶m_1_dev_1_Uold);
if (stat_dev_1_Uold != cudaSuccess)
fprintf(stderr,"%s\n",cudaGetErrorString(stat_dev_1_Uold));
/* Mint: Replaced Pragma: #pragma mint copy( Unew, toDevice,( n+2 ), m+2,( k+2 ) ) */
cudaError_t stat_dev_2_Unew;
cudaExtent ext_dev_2_Unew = make_cudaExtent(((n+2)) * sizeof(double ),(m+2),((k+2)));
/* Mint: Malloc on the device */
cudaPitchedPtr dev_2_Unew;
stat_dev_2_Unew = cudaMalloc3D(&dev_2_Unew,ext_dev_2_Unew);
if (stat_dev_2_Unew != cudaSuccess)
fprintf(stderr,"%s\n",cudaGetErrorString(stat_dev_2_Unew));
/* Mint: Copy host to device */
cudaMemcpy3DParms param_2_dev_2_Unew = {0};
param_2_dev_2_Unew . srcPtr = make_cudaPitchedPtr(((void *)Unew[0][0]),((n+2)) * sizeof(double ),((n+2)),(m+2));
param_2_dev_2_Unew . dstPtr = dev_2_Unew;
param_2_dev_2_Unew . extent = ext_dev_2_Unew;
param_2_dev_2_Unew . kind = cudaMemcpyHostToDevice;
stat_dev_2_Unew = cudaMemcpy3D(¶m_2_dev_2_Unew);
if (stat_dev_2_Unew != cudaSuccess)
fprintf(stderr,"%s\n",cudaGetErrorString(stat_dev_2_Unew));
{
time_elapsed = getTime();
int t = 0;
while(t < T){
t++;
int x;
int y;
int z;
//7-point stencil
#pragma mint for nest(all) tile(16,16,16) chunksize(1,1,16)
int num3blockDim_1_1527 = (k - 1 + 1) % 16 == 0?(k - 1 + 1) / 16 : (k - 1 + 1) / 16 + 1;
int num2blockDim_1_1527 = (m - 1 + 1) % 16 == 0?(m - 1 + 1) / 16 : (m - 1 + 1) / 16 + 1;
int num1blockDim_1_1527 = (n - 1 + 1) % 16 == 0?(n - 1 + 1) / 16 : (n - 1 + 1) / 16 + 1;
float invYnumblockDim_1_1527 = 1.00000F / num2blockDim_1_1527;
dim3 blockDim_1_1527(16,16,1);
dim3 gridDim_1_1527(num1blockDim_1_1527,num2blockDim_1_1527*num3blockDim_1_1527);
mint_1_1527<<<gridDim_1_1527,blockDim_1_1527>>>(n,m,k,c0,c1,dev_2_Unew,dev_1_Uold,num2blockDim_1_1527,invYnumblockDim_1_1527);
cudaThreadSynchronize();
cudaError_t err_mint_1_1527 = cudaGetLastError();
if (err_mint_1_1527) {
fprintf(stderr,"In %s, %s\n","mint_1_1527",cudaGetErrorString(err_mint_1_1527));
}
#pragma mint single
{
double ***tmp;
void *dev_tmp;
dev_tmp = dev_1_Uold . ptr;
dev_1_Uold . ptr = dev_2_Unew . ptr;
dev_2_Unew . ptr = dev_tmp;
nIters = t;
}
//end of while
}
//end of parallel region
}
cudaFree(dev_2_Unew . ptr);
cudaFree(dev_1_Uold . ptr);
#pragma mint copy(Uold, fromDevice, (n+2), (m+2), (k+2))
time_elapsed = getTime() - time_elapsed;
Gflops = ((double )((nIters * n * m * k) * 1.0e-9 * 8)) / time_elapsed;
printf("%s%3.3f \t%5.3f\n","Heat3D ",time_elapsed,Gflops);
calculatel2Norm(Uold,n,m,k,T);
free3D(Uold);
free3D(Unew);
return 0;
}
__global__ static void mint_1_1527(int n,int m,int k,double c0,double c1,cudaPitchedPtr dev_2_Unew,cudaPitchedPtr dev_1_Uold,int num2blockDim_1_1527,float invYnumblockDim_1_1527)
{
#define TILE_X 16
#define TILE_Y 16
__device__ __shared__ double _sh_block_Uold[TILE_Y + 2][TILE_X + 2];
double *Unew = (double *)dev_2_Unew . ptr;
int _width = dev_2_Unew . pitch / sizeof(double );
int _slice = dev_2_Unew . ysize * _width;
double *Uold = (double *)dev_1_Uold . ptr;
float blocksInY = num2blockDim_1_1527;
float invBlocksInY = invYnumblockDim_1_1527;
int _p_x;
int _p_y;
int _p_z;
{
int _upperb_y = m;
int _upperb_x = n;
int _idx = threadIdx.x + 1;
int _gidx = _idx + blockDim.x * blockIdx.x;
int _idy = threadIdx.y + 1;
int _gidy = _idy + blockDim.y * 1 * blockIdx.y;
int _idz = threadIdx.z + 1;
int blockIdxz = blockIdx.y * invBlocksInY;
int blockIdxy = blockIdx.y - blockIdxz * blocksInY;
_gidy = _idy + blockIdxy * blockDim.y;
int _gidz = _idz + blockIdxz * 16;
int _index3D = _gidx + _gidy * _width + _gidz * _slice;
double up__rUold = Uold[_index3D - _slice];
double _rUold = Uold[_index3D];
_idz = 1;
_idy = threadIdx.y + 1;
_idx = threadIdx.x + 1;
int _borderIdx = _idx;
int _borderIdy = 0;
int _borderGlobalIndexDiff = 0;
_borderIdx = (threadIdx.y == 1?0 : _borderIdx);
_borderIdx = (threadIdx.y == 2?blockDim.x + 1 : _borderIdx);
_borderIdy = (threadIdx.y == 3?blockDim.y + 1 : _borderIdy);
_borderIdy = (threadIdx.y == 1 || threadIdx.y == 2?_idx : _borderIdy);
_borderGlobalIndexDiff = _borderIdx - _idx + _width * (_borderIdy - _idy);
{
int _upper_gidz = _gidz + 16 < k?_gidz + 15 : k;
{
if (_gidy >= 1 && _gidy <= m) {{
if (_gidx >= 1 && _gidx <= n)
for (_gidz = _gidz; _gidz <= _upper_gidz; _gidz += 1) {
_index3D = _gidx + _gidy * _width + _gidz * _slice;
{
_sh_block_Uold[_idy][_idx] = _rUold;
if (threadIdx.y < 4 * 1)
_sh_block_Uold[_borderIdy][_borderIdx] = Uold[_index3D + _borderGlobalIndexDiff];
double down__rUold = Uold[_index3D + _slice];
double _rUnew;
__syncthreads();
_rUnew = c0 * _sh_block_Uold[_idy][_idx] + c1 * (_sh_block_Uold[_idy][_idx - 1] + _sh_block_Uold[_idy][_idx + 1] + _sh_block_Uold[_idy - 1][_idx] + _sh_block_Uold[_idy + 1][_idx] + up__rUold + down__rUold);
Unew[_index3D] = _rUnew;
up__rUold = _rUold;
_rUold = down__rUold;
__syncthreads();
}
}
}
}
}
}
}
}
|
e00331e4ef978d3370696cc62a5c61d5cb786f34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "im2col.hpp"
#define SHMEM_SIZE 32
__global__ void matrixMul(const float *A,const float *B, float *C, int m, int n, int k, int batch_size) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ float s_a[SHMEM_SIZE];
__shared__ float s_b[SHMEM_SIZE];
for(int t = 0;t < batch_size; t++) {
// Accumulate in temporary variable
float tmp = 0;
// Sweep tile across matrix
for (int i = 0; i < k; i += blockDim.x) {
// Load in elements for this tile
s_a[threadIdx.y * blockDim.x + threadIdx.x] = A[row * k + i + threadIdx.x + t*m*k];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = B[i * n + threadIdx.y * n + col];
// Wait for both tiles to be loaded in before doing computation
__syncthreads();
// Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
}
// Write back results
C[row * n + col + t*m*n] = tmp;
}
}
// converts a batch of images of shape: data_im: batch x ic x ih x iw (ic: input_channels in image)
// to 2D col of shape: data_col: batch x (ic * kh * kw) x (hcol * wcol)
// filter size: kh x kw
// kernel multiplication patches: hcol x wcol (Based on input size, kernel size, padding, stride)
// Each thread writes one kernel multiplication patch (kh x kw) in data_col
// n is the number of tasks (here: ic * hcol * wcol, ie number of kernel patches per image)
__global__ void im2col_kernel(const float * data_im, float * data_col, const int n,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic,
const int hcol, const int wcol)
{
// esentially this loop could have run batch size number of times
// but since we are launching enough threads to handle each image separately, it executes just once
// here it is majorly prevents any extra threads we launch from accessing memory
CUDA_KERNEL_LOOP(index, n)
{
// figure out which part of which image you will work on
int imidx = blockIdx.y;
int w_out = index % wcol;
index /= wcol;
int h_out = index % hcol;
int channel_in = index / hcol;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
// this thread will write the output patch (kh x kw) at location (imidx, channel_out, h_out, w_out)
// that patch is based on the image patch at (imidx, channel_in, h_in, w_in)
// i.e. will do the work for patch centred at (channel_in, h_in, w_in) in image imidx
data_im += ((imidx * ic + channel_in) * ih + h_in) * iw + w_in;
data_col += ((imidx * ic + channel_in) * kh * kw * hcol + h_out) * wcol + w_out;
#pragma unroll
for (int i = 0; i < kh; ++i) {
for (int j = 0; j < kw; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < ih && w < iw) ?
data_im[i * iw + j]: 0;
data_col += hcol * wcol;
}
}
}
}
// takes a batch of images on GPU: bs x ic x ih x iw (ic: input channels, bs: batch size)
// and the kernels on GPU: oc x ic x kh x kw (oc: output channels)
// does the convolution based on padding (pad) and stride
// data_col is used for intermediate col form storage
// output is returned in data_out
void im2col_gemm_gpu(const float * data_im, const float * data_ker,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic, const int oc,
float * data_col, float * data_out, int bs)
{
//hipDeviceProp_t devp;
//hipGetDeviceProperties(&devp, 0);
//unsigned int SHMEM_SIZE = devp.sharedMemPerBlock/(2*sizeof(float));
//printf("Total shared memory per block:%u\n", SHMEM_SIZE);
// Step 1: convert the image to col form
// dimensions of the col corr to this image
int hcol = (ih + 2 * pad - kh) / stride + 1;
int wcol = (iw + 2 * pad - kw) / stride + 1;
// We are going to launch bs groups of ic * hcol * wcol kernels threads for im2col,
// each thread is responsible for copying a single-channel kernel multiplication patch
// i.e. one thread per output pixel in the output of conv
// So, all images in batch are converted to col form parallely
int op_size = ic * hcol * wcol;
dim3 blocks(GET_BLOCKS(op_size), bs, 1);
dim3 threads(CUDA_NUM_THREADS, 1, 1);
hipLaunchKernelGGL(( im2col_kernel), dim3(blocks), dim3(threads), 0, 0, data_im, data_col, op_size, kh, kw, pad, stride, ih, iw, ic, hcol, wcol);
//CUDA_POST_KERNEL_CHECK; // check if there was any error
// now, the col form shall be multiplied with the kernels laid out straight i.e. (ic * kh * kw)
// so, since, oc is the number of kernels, we get:
// "2D kernel matrix" oc x (ic * kh * kw)
// and the "2D col matrix" for one image is: (ic * kh * kw) x (hcol * wcol)
// and you see that magically, their multiplication output is:
// output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next convolution
// output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next im2col
// so, there is no need to ever work things back (col2im) or reshape either
// in sumamary, we do matmul(kernel, im2col(im_input)) -> conv_output (in "correct" form)
// Step 2: GEMM using libcublas
// get params ready for GEMM call
// Performs C + i*strideC = op(A + i*strideA) op(B + i*strideB) + (C + i* strideC)
// for i [0, batchSize 1]
// Thus, this one call will parallely do the matrix multiplication for all images in the batch
// Since we are doing A * B, we need = 1, = 0
// Since we don't need any transpose, op = HIPBLAS_OP_N
const float alpha = 1.0f;
const float beta = 0.0f;
int ldA, ldB, ldC;
int m = ldA = ldC = hcol * wcol;
int n = oc;
int k = ldB = ic * kh * kw;
long long int strideA = m * k; // size of each col form
long long int strideB = 0; // reusing the same kernel matrix for each image
long long int strideC = m * n; // size of output feature map
// CUDA sees matrices as column major
// So, a matrix we see as HxW, it would see as WxH in the same memory layout
// So, matA (our view) -> matA' (CUDA view)
// Thus, to do matA * matB in our view, we shall run CUDA for matB * matA.
// Output would be matB' * matA' (CUDA view) = (matA * matB)' (CUDA view) = matA * matB (our view)
// In essence, trust me when I do col * kernel to achieve kernel * col
// Blocks per grid dimension (assumes CUDA_NUM_THREADS divides M and n evenly)
int BLOCKS_X = n / CUDA_NUM_THREADS;
int BLOCKS_Y = m / CUDA_NUM_THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads2(CUDA_NUM_THREADS, CUDA_NUM_THREADS);
dim3 blocks2(BLOCKS_X, BLOCKS_Y);
// Launch kernel
hipLaunchKernelGGL(( matrixMul), dim3(blocks2), dim3(threads2), 0, 0, data_col, data_ker, data_out, m, n, k, bs);
}
// takes a batch of images on CPU: data_im: batch x ic x ih x iw (ic: input channels)
// and the kernels on CPU: data_ker: oc x ic x kh x kw (oc: output channels)
// does the convolution based on padding (pad) and stride
// returns the convolution output on CPU
// conv_time & overhead_time are used for kernel timing
float * im2colWithCuda(const float * data_im, const float * data_ker, const int batch,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic, const int oc,
float& conv_time, float& overhead_time)
{
// Timing variables - CUDA Event API
overhead_time = 0;
conv_time = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// image dim
ssize_t image_size = ic * ih * iw;
ssize_t images_size = batch * image_size;
// kernel dim
ssize_t k = ic * kh * kw;
ssize_t kernels_size = oc * k;
// col dim
ssize_t hcol = (ih + 2 * pad - kh) / stride + 1;
ssize_t wcol = (iw + 2 * pad - kw) / stride + 1;
ssize_t one_col = ic * kh * kw * hcol * wcol;
ssize_t col_batch = batch * one_col;
// output dim
ssize_t output_feature = oc * hcol * wcol;
ssize_t result_size = batch * output_feature;
// move images to GPU
float * dev_image = nullptr;
CUDA_CHECK(hipMalloc((void**)&dev_image, images_size * sizeof(float)));
CUDA_CHECK(hipMemcpy(dev_image, data_im, images_size * sizeof(float), hipMemcpyHostToDevice));
// move kernels to GPU
float * dev_kernel = nullptr;
CUDA_CHECK(hipMalloc((void**)&dev_kernel, kernels_size * sizeof(float)));
CUDA_CHECK(hipMemcpy(dev_kernel, data_ker, kernels_size * sizeof(float), hipMemcpyHostToDevice));
// allocate GPU memory for intermediate col form
float * dev_col = nullptr;
CUDA_CHECK(hipMalloc((void**)&dev_col, col_batch * sizeof(float)));
// allocate GPU memory for convlution result
float * dev_ret = nullptr;
CUDA_CHECK(hipMalloc((void**)&dev_ret, result_size * sizeof(float)));
// Record the kernel run time
hipEventRecord(start);
// Kernel launch - this single call will handle all the images in the batch parallely
im2col_gemm_gpu(dev_image, dev_kernel, kh, kw, pad, stride, ih, iw, ic, oc, dev_col, dev_ret, batch);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&conv_time, start, stop);
// Check for any errors launching the kernel
//CUDA_POST_KERNEL_CHECK;
// Copy output vector from GPU to host memory.
float * data_ret = (float *)malloc(result_size * sizeof(float));
CUDA_CHECK(hipMemcpy(data_ret, dev_ret, result_size * sizeof(float), hipMemcpyDeviceToHost));
// Free CUDA memory
hipFree(dev_image);
hipFree(dev_col);
hipFree(dev_kernel);
hipFree(dev_ret);
// Free timing resources
hipEventDestroy(start);
hipEventDestroy(stop);
return data_ret;
}
// The exposed library function which just calls im2colWithCuda the right way
float* IM2COL::forward(int out_size, int channel, int kernel_height, int kernel_width, int pad,
int stride, float* kernel, int batch_size, int input_height, int input_width, float* input,
float& conv_time, float& overhead_time)
{
return im2colWithCuda(input, kernel, batch_size, kernel_height, kernel_width,
pad, stride, input_height, input_width, channel, out_size, conv_time, overhead_time);
}
| e00331e4ef978d3370696cc62a5c61d5cb786f34.cu | #include "im2col.hpp"
#define SHMEM_SIZE 32
__global__ void matrixMul(const float *A,const float *B, float *C, int m, int n, int k, int batch_size) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Statically allocated shared memory
__shared__ float s_a[SHMEM_SIZE];
__shared__ float s_b[SHMEM_SIZE];
for(int t = 0;t < batch_size; t++) {
// Accumulate in temporary variable
float tmp = 0;
// Sweep tile across matrix
for (int i = 0; i < k; i += blockDim.x) {
// Load in elements for this tile
s_a[threadIdx.y * blockDim.x + threadIdx.x] = A[row * k + i + threadIdx.x + t*m*k];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = B[i * n + threadIdx.y * n + col];
// Wait for both tiles to be loaded in before doing computation
__syncthreads();
// Do matrix multiplication on the small matrix
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
// Wait for all threads to finish using current tiles before loading in new
// ones
__syncthreads();
}
// Write back results
C[row * n + col + t*m*n] = tmp;
}
}
// converts a batch of images of shape: data_im: batch x ic x ih x iw (ic: input_channels in image)
// to 2D col of shape: data_col: batch x (ic * kh * kw) x (hcol * wcol)
// filter size: kh x kw
// kernel multiplication patches: hcol x wcol (Based on input size, kernel size, padding, stride)
// Each thread writes one kernel multiplication patch (kh x kw) in data_col
// n is the number of tasks (here: ic * hcol * wcol, ie number of kernel patches per image)
__global__ void im2col_kernel(const float * data_im, float * data_col, const int n,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic,
const int hcol, const int wcol)
{
// esentially this loop could have run batch size number of times
// but since we are launching enough threads to handle each image separately, it executes just once
// here it is majorly prevents any extra threads we launch from accessing memory
CUDA_KERNEL_LOOP(index, n)
{
// figure out which part of which image you will work on
int imidx = blockIdx.y;
int w_out = index % wcol;
index /= wcol;
int h_out = index % hcol;
int channel_in = index / hcol;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
// this thread will write the output patch (kh x kw) at location (imidx, channel_out, h_out, w_out)
// that patch is based on the image patch at (imidx, channel_in, h_in, w_in)
// i.e. will do the work for patch centred at (channel_in, h_in, w_in) in image imidx
data_im += ((imidx * ic + channel_in) * ih + h_in) * iw + w_in;
data_col += ((imidx * ic + channel_in) * kh * kw * hcol + h_out) * wcol + w_out;
#pragma unroll
for (int i = 0; i < kh; ++i) {
for (int j = 0; j < kw; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < ih && w < iw) ?
data_im[i * iw + j]: 0;
data_col += hcol * wcol;
}
}
}
}
// takes a batch of images on GPU: bs x ic x ih x iw (ic: input channels, bs: batch size)
// and the kernels on GPU: oc x ic x kh x kw (oc: output channels)
// does the convolution based on padding (pad) and stride
// data_col is used for intermediate col form storage
// output is returned in data_out
void im2col_gemm_gpu(const float * data_im, const float * data_ker,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic, const int oc,
float * data_col, float * data_out, int bs)
{
//cudaDeviceProp devp;
//cudaGetDeviceProperties(&devp, 0);
//unsigned int SHMEM_SIZE = devp.sharedMemPerBlock/(2*sizeof(float));
//printf("Total shared memory per block:%u\n", SHMEM_SIZE);
// Step 1: convert the image to col form
// dimensions of the col corr to this image
int hcol = (ih + 2 * pad - kh) / stride + 1;
int wcol = (iw + 2 * pad - kw) / stride + 1;
// We are going to launch bs groups of ic * hcol * wcol kernels threads for im2col,
// each thread is responsible for copying a single-channel kernel multiplication patch
// i.e. one thread per output pixel in the output of conv
// So, all images in batch are converted to col form parallely
int op_size = ic * hcol * wcol;
dim3 blocks(GET_BLOCKS(op_size), bs, 1);
dim3 threads(CUDA_NUM_THREADS, 1, 1);
im2col_kernel<<<blocks, threads>>>(data_im, data_col, op_size, kh, kw, pad, stride, ih, iw, ic, hcol, wcol);
//CUDA_POST_KERNEL_CHECK; // check if there was any error
// now, the col form shall be multiplied with the kernels laid out straight i.e. (ic * kh * kw)
// so, since, oc is the number of kernels, we get:
// "2D kernel matrix" oc x (ic * kh * kw)
// and the "2D col matrix" for one image is: (ic * kh * kw) x (hcol * wcol)
// and you see that magically, their multiplication output is:
// output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next convolution
// output: oc x (hcol * wcol)... ie oc x hcol x wcol, the exact shape needed by next im2col
// so, there is no need to ever work things back (col2im) or reshape either
// in sumamary, we do matmul(kernel, im2col(im_input)) -> conv_output (in "correct" form)
// Step 2: GEMM using libcublas
// get params ready for GEMM call
// Performs C + i*strideC = α op(A + i*strideA) op(B + i*strideB) + β(C + i* strideC)
// for i ∈ [0, batchSize − 1]
// Thus, this one call will parallely do the matrix multiplication for all images in the batch
// Since we are doing A * B, we need α = 1, β = 0
// Since we don't need any transpose, op = CUBLAS_OP_N
const float alpha = 1.0f;
const float beta = 0.0f;
int ldA, ldB, ldC;
int m = ldA = ldC = hcol * wcol;
int n = oc;
int k = ldB = ic * kh * kw;
long long int strideA = m * k; // size of each col form
long long int strideB = 0; // reusing the same kernel matrix for each image
long long int strideC = m * n; // size of output feature map
// CUDA sees matrices as column major
// So, a matrix we see as HxW, it would see as WxH in the same memory layout
// So, matA (our view) -> matA' (CUDA view)
// Thus, to do matA * matB in our view, we shall run CUDA for matB * matA.
// Output would be matB' * matA' (CUDA view) = (matA * matB)' (CUDA view) = matA * matB (our view)
// In essence, trust me when I do col * kernel to achieve kernel * col
// Blocks per grid dimension (assumes CUDA_NUM_THREADS divides M and n evenly)
int BLOCKS_X = n / CUDA_NUM_THREADS;
int BLOCKS_Y = m / CUDA_NUM_THREADS;
// Use dim3 structs for block and grid dimensions
dim3 threads2(CUDA_NUM_THREADS, CUDA_NUM_THREADS);
dim3 blocks2(BLOCKS_X, BLOCKS_Y);
// Launch kernel
matrixMul<<<blocks2, threads2>>>(data_col, data_ker, data_out, m, n, k, bs);
}
// takes a batch of images on CPU: data_im: batch x ic x ih x iw (ic: input channels)
// and the kernels on CPU: data_ker: oc x ic x kh x kw (oc: output channels)
// does the convolution based on padding (pad) and stride
// returns the convolution output on CPU
// conv_time & overhead_time are used for kernel timing
float * im2colWithCuda(const float * data_im, const float * data_ker, const int batch,
const int kh, const int kw, const int pad, const int stride,
const int ih, const int iw, const int ic, const int oc,
float& conv_time, float& overhead_time)
{
// Timing variables - CUDA Event API
overhead_time = 0;
conv_time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// image dim
ssize_t image_size = ic * ih * iw;
ssize_t images_size = batch * image_size;
// kernel dim
ssize_t k = ic * kh * kw;
ssize_t kernels_size = oc * k;
// col dim
ssize_t hcol = (ih + 2 * pad - kh) / stride + 1;
ssize_t wcol = (iw + 2 * pad - kw) / stride + 1;
ssize_t one_col = ic * kh * kw * hcol * wcol;
ssize_t col_batch = batch * one_col;
// output dim
ssize_t output_feature = oc * hcol * wcol;
ssize_t result_size = batch * output_feature;
// move images to GPU
float * dev_image = nullptr;
CUDA_CHECK(cudaMalloc((void**)&dev_image, images_size * sizeof(float)));
CUDA_CHECK(cudaMemcpy(dev_image, data_im, images_size * sizeof(float), cudaMemcpyHostToDevice));
// move kernels to GPU
float * dev_kernel = nullptr;
CUDA_CHECK(cudaMalloc((void**)&dev_kernel, kernels_size * sizeof(float)));
CUDA_CHECK(cudaMemcpy(dev_kernel, data_ker, kernels_size * sizeof(float), cudaMemcpyHostToDevice));
// allocate GPU memory for intermediate col form
float * dev_col = nullptr;
CUDA_CHECK(cudaMalloc((void**)&dev_col, col_batch * sizeof(float)));
// allocate GPU memory for convlution result
float * dev_ret = nullptr;
CUDA_CHECK(cudaMalloc((void**)&dev_ret, result_size * sizeof(float)));
// Record the kernel run time
cudaEventRecord(start);
// Kernel launch - this single call will handle all the images in the batch parallely
im2col_gemm_gpu(dev_image, dev_kernel, kh, kw, pad, stride, ih, iw, ic, oc, dev_col, dev_ret, batch);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&conv_time, start, stop);
// Check for any errors launching the kernel
//CUDA_POST_KERNEL_CHECK;
// Copy output vector from GPU to host memory.
float * data_ret = (float *)malloc(result_size * sizeof(float));
CUDA_CHECK(cudaMemcpy(data_ret, dev_ret, result_size * sizeof(float), cudaMemcpyDeviceToHost));
// Free CUDA memory
cudaFree(dev_image);
cudaFree(dev_col);
cudaFree(dev_kernel);
cudaFree(dev_ret);
// Free timing resources
cudaEventDestroy(start);
cudaEventDestroy(stop);
return data_ret;
}
// The exposed library function which just calls im2colWithCuda the right way
float* IM2COL::forward(int out_size, int channel, int kernel_height, int kernel_width, int pad,
int stride, float* kernel, int batch_size, int input_height, int input_width, float* input,
float& conv_time, float& overhead_time)
{
return im2colWithCuda(input, kernel, batch_size, kernel_height, kernel_width,
pad, stride, input_height, input_width, channel, out_size, conv_time, overhead_time);
}
|
3fdf3b3aa81cb6cd53a01d9947a36cbe1c11d16a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/TensorIterator.h>
#include <ATen/native/quantized/affine_quantizer.h>
#include <math.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
namespace at {
namespace native {
namespace {
void quantize_tensor_per_tensor_affine_cuda(
const Tensor& rtensor,
Tensor& qtensor,
double scale,
int64_t zero_point) {
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "quantize_tensor_per_tensor_affine_cuda", [&]() {
constexpr int64_t qmin = std::numeric_limits<underlying_t>::min();
constexpr int64_t qmax = std::numeric_limits<underlying_t>::max();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(qtensor)
.add_input(rtensor)
.add_input(qtensor)
.build();
gpu_kernel(
iter,
[=] GPU_LAMBDA(float raw_val, scalar_t quantized_val) -> scalar_t {
int64_t qvalue =
static_cast<int64_t>(nearbyint(raw_val / scale) + zero_point);
qvalue = std::max<int64_t>(qvalue, qmin);
qvalue = std::min<int64_t>(qvalue, qmax);
quantized_val.val_ = qvalue;
return quantized_val;
});
});
}
void dequantize_tensor_per_tensor_affine_cuda(
const Tensor& qtensor,
Tensor& rtensor,
double scale,
int64_t zero_point) {
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "dequantize_tensor_per_tensor_affine_cuda", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(rtensor)
.add_input(qtensor)
.build();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t value) -> float {
return (static_cast<float>(value.val_) - zero_point) * scale;
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(
quantize_tensor_per_tensor_affine_stub,
&quantize_tensor_per_tensor_affine_cuda);
REGISTER_DISPATCH(
dequantize_tensor_per_tensor_affine_stub,
&dequantize_tensor_per_tensor_affine_cuda);
} // namespace native
} // namespace at
| 3fdf3b3aa81cb6cd53a01d9947a36cbe1c11d16a.cu | #include <ATen/native/TensorIterator.h>
#include <ATen/native/quantized/affine_quantizer.h>
#include <math.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
namespace {
void quantize_tensor_per_tensor_affine_cuda(
const Tensor& rtensor,
Tensor& qtensor,
double scale,
int64_t zero_point) {
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "quantize_tensor_per_tensor_affine_cuda", [&]() {
constexpr int64_t qmin = std::numeric_limits<underlying_t>::min();
constexpr int64_t qmax = std::numeric_limits<underlying_t>::max();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(qtensor)
.add_input(rtensor)
.add_input(qtensor)
.build();
gpu_kernel(
iter,
[=] GPU_LAMBDA(float raw_val, scalar_t quantized_val) -> scalar_t {
int64_t qvalue =
static_cast<int64_t>(nearbyint(raw_val / scale) + zero_point);
qvalue = std::max<int64_t>(qvalue, qmin);
qvalue = std::min<int64_t>(qvalue, qmax);
quantized_val.val_ = qvalue;
return quantized_val;
});
});
}
void dequantize_tensor_per_tensor_affine_cuda(
const Tensor& qtensor,
Tensor& rtensor,
double scale,
int64_t zero_point) {
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "dequantize_tensor_per_tensor_affine_cuda", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(rtensor)
.add_input(qtensor)
.build();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t value) -> float {
return (static_cast<float>(value.val_) - zero_point) * scale;
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(
quantize_tensor_per_tensor_affine_stub,
&quantize_tensor_per_tensor_affine_cuda);
REGISTER_DISPATCH(
dequantize_tensor_per_tensor_affine_stub,
&dequantize_tensor_per_tensor_affine_cuda);
} // namespace native
} // namespace at
|
2e16c37ab410b57a9b0cf61de9e4c4e3126b4196.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A;
checkCudaErrors(hipHostMalloc(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B;
checkCudaErrors(hipHostMalloc(&h_B, mem_size_B));
hipStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C;
checkCudaErrors(hipHostMalloc(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_A, h_A, mem_size_A, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(d_B, h_B, mem_size_B, hipMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(hipStreamSynchronize(stream));
// Record the start event
checkCudaErrors(hipEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads), 0, stream, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpyAsync(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
checkCudaErrors(hipHostFree(h_A));
checkCudaErrors(hipHostFree(h_B));
checkCudaErrors(hipHostFree(h_C));
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 2e16c37ab410b57a9b0cf61de9e4c4e3126b4196.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A;
checkCudaErrors(cudaMallocHost(&h_A, mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B;
checkCudaErrors(cudaMallocHost(&h_B, mem_size_B));
cudaStream_t stream;
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C;
checkCudaErrors(cudaMallocHost(&h_C, mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice, stream));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
checkCudaErrors(cudaStreamSynchronize(stream));
// Record the start event
checkCudaErrors(cudaEventRecord(start, stream));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<<grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<<grid, threads, 0, stream>>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, stream));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpyAsync(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
checkCudaErrors(cudaFreeHost(h_A));
checkCudaErrors(cudaFreeHost(h_B));
checkCudaErrors(cudaFreeHost(h_C));
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
e415c4abb1d0a7e2041376c69304ccb65b5c0ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "common_header.h"
cudaReturnValue cublasDgemmWrapper(
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double* alpha,
const double* A, int lda,
const double* B, int ldb,
const double* beta,
double* C, int ldc) {
double* dev_A = 0;
const int dev_A_size = m * k * sizeof(double);
double* dev_B = 0;
const int dev_B_size = n * k * sizeof(double);
double* dev_C = 0;
const int dev_C_size = m * n * sizeof(double);
hipError_t cudaStatus;
double executionTime = -1.;
hipblasHandle_t handle;
hipblasStatus_t stat;
clock_t t;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)& dev_A, dev_A_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_B, dev_B_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)& dev_C, dev_C_size);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "CUBLAS initialization failed\n");
cudaStatus = hipErrorNotSupported;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_A, A, dev_A_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_B, B, dev_B_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_C, C, dev_C_size, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// start time measurement
t = clock();
// execute hipblasDgemm
stat = hipblasDgemm(
handle,
transa, transb,
m, n, k,
alpha,
dev_A, lda,
dev_B, ldb,
beta,
dev_C, ldc
);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipblasDgemm launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// measure time
t = clock() - t;
executionTime = ((double)t) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(C, dev_C, dev_C_size, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
return { cudaStatus, executionTime };
} | e415c4abb1d0a7e2041376c69304ccb65b5c0ad6.cu | #include "common_header.h"
cudaReturnValue cublasDgemmWrapper(
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double* alpha,
const double* A, int lda,
const double* B, int ldb,
const double* beta,
double* C, int ldc) {
double* dev_A = 0;
const int dev_A_size = m * k * sizeof(double);
double* dev_B = 0;
const int dev_B_size = n * k * sizeof(double);
double* dev_C = 0;
const int dev_C_size = m * n * sizeof(double);
cudaError_t cudaStatus;
double executionTime = -1.;
cublasHandle_t handle;
cublasStatus_t stat;
clock_t t;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)& dev_A, dev_A_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_B, dev_B_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)& dev_C, dev_C_size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "CUBLAS initialization failed\n");
cudaStatus = cudaErrorNotSupported;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_A, A, dev_A_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_B, B, dev_B_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_C, C, dev_C_size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// start time measurement
t = clock();
// execute cublasDgemm
stat = cublasDgemm(
handle,
transa, transb,
m, n, k,
alpha,
dev_A, lda,
dev_B, ldb,
beta,
dev_C, ldc
);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cublasDgemm launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// measure time
t = clock() - t;
executionTime = ((double)t) / CLOCKS_PER_SEC;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(C, dev_C, dev_C_size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return { cudaStatus, executionTime };
} |
a4a9fe0c3933d04c9f15c0b9166f37f26aae667f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/KmBurstAve4d.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <algorithm>
namespace faiss {
namespace gpu {
/*********************************************
Compute Ave Along 1st dim
*********************************************/
template <typename T>
__global__ void kmb_ave_kernel4d(Tensor<T, 4, true, int> tensor,
Tensor<T, 3, true, int> ave){
// height,width
int h = blockIdx.x;
int w = blockIdx.y;
// get dims to comp indices from thread
int nframes = tensor.getSize(0);
int nblocks = tensor.getSize(1);
int dim = nblocks;
T inv_nframes = 1./nframes;
// helpers
int fIdx,b;
T ave_val;
// set clusters
for (int tIdx = threadIdx.x; tIdx < dim; tIdx += blockDim.x){
b = tIdx % nblocks;
ave_val = 0;
for (int fIdx = 0; fIdx < nframes; ++fIdx){
ave_val += tensor[fIdx][b][h][w];
}
ave[b][h][w] = Math<T>::mul(ave_val,inv_nframes);
}
}
template <typename T>
void kmb_ave4d(Tensor<T, 4, true, int> tensor,
Tensor<T, 3, true, int> ave,
hipStream_t stream){
// shapes
int nframes = tensor.getSize(0);
int bBatch = tensor.getSize(1);
int hBatch = tensor.getSize(2);
int wBatch = tensor.getSize(3);
// threads
int maxThreads = (int)getMaxThreadsCurrentDevice();
int dim = bBatch;
int numThreads = ::min(dim, maxThreads);
// launch
auto grid = dim3(hBatch,wBatch);
auto block = dim3(numThreads);
// launch kernel
hipLaunchKernelGGL(( kmb_ave_kernel4d), dim3(grid),dim3(block),0,stream, tensor,ave);
// error check
CUDA_TEST_ERROR();
}
void kmb_ave4d(Tensor<float, 4, true, int> tensor,
Tensor<float, 3, true, int> ave,
hipStream_t stream){
kmb_ave4d<float>(tensor,ave,stream);
}
void kmb_ave4d(Tensor<half, 4, true, int> tensor,
Tensor<half, 3, true, int> ave,
hipStream_t stream){
kmb_ave4d<half>(tensor,ave,stream);
}
} // namespace gpu
} // namespace faiss
| a4a9fe0c3933d04c9f15c0b9166f37f26aae667f.cu |
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/KmBurstAve4d.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/MathOperators.cuh>
#include <faiss/gpu/utils/PtxUtils.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <algorithm>
namespace faiss {
namespace gpu {
/*********************************************
Compute Ave Along 1st dim
*********************************************/
template <typename T>
__global__ void kmb_ave_kernel4d(Tensor<T, 4, true, int> tensor,
Tensor<T, 3, true, int> ave){
// height,width
int h = blockIdx.x;
int w = blockIdx.y;
// get dims to comp indices from thread
int nframes = tensor.getSize(0);
int nblocks = tensor.getSize(1);
int dim = nblocks;
T inv_nframes = 1./nframes;
// helpers
int fIdx,b;
T ave_val;
// set clusters
for (int tIdx = threadIdx.x; tIdx < dim; tIdx += blockDim.x){
b = tIdx % nblocks;
ave_val = 0;
for (int fIdx = 0; fIdx < nframes; ++fIdx){
ave_val += tensor[fIdx][b][h][w];
}
ave[b][h][w] = Math<T>::mul(ave_val,inv_nframes);
}
}
template <typename T>
void kmb_ave4d(Tensor<T, 4, true, int> tensor,
Tensor<T, 3, true, int> ave,
cudaStream_t stream){
// shapes
int nframes = tensor.getSize(0);
int bBatch = tensor.getSize(1);
int hBatch = tensor.getSize(2);
int wBatch = tensor.getSize(3);
// threads
int maxThreads = (int)getMaxThreadsCurrentDevice();
int dim = bBatch;
int numThreads = std::min(dim, maxThreads);
// launch
auto grid = dim3(hBatch,wBatch);
auto block = dim3(numThreads);
// launch kernel
kmb_ave_kernel4d<<<grid,block,0,stream>>>(tensor,ave);
// error check
CUDA_TEST_ERROR();
}
void kmb_ave4d(Tensor<float, 4, true, int> tensor,
Tensor<float, 3, true, int> ave,
cudaStream_t stream){
kmb_ave4d<float>(tensor,ave,stream);
}
void kmb_ave4d(Tensor<half, 4, true, int> tensor,
Tensor<half, 3, true, int> ave,
cudaStream_t stream){
kmb_ave4d<half>(tensor,ave,stream);
}
} // namespace gpu
} // namespace faiss
|
106c47d066281273d2e9a33baeb3dd17335a0ed3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "generic_ringbuf.cu.h"
#include <stdlib.h>
#include <stdio.h>
#include "util.cu.h"
__host__ void ringbuf_metadata_init(ringbuf_metadata_t** rb_cpu, ringbuf_metadata_t** rb_gpu, int num_elem)
{
ringbuf_metadata_t* rbm=(ringbuf_metadata_t*)malloc(sizeof(ringbuf_metadata_t)); // metadata in CPU
rbm->_size=num_elem;
rbm->_head=rbm->_tail=0;
(*rb_cpu)=rbm;
// metadata in CPU shared with GPU
CUDA_SAFE_CALL(hipHostRegister(rbm,sizeof(ringbuf_metadata_t),hipHostRegisterMapped));
CUDA_SAFE_CALL(hipHostGetDevicePointer((void**)rb_gpu,(void*)rbm,0));
}
__host__ void ringbuf_metadata_free(ringbuf_metadata_t* rb_cpu)
{
CUDA_SAFE_CALL(hipHostUnregister(rb_cpu));
free(rb_cpu);
}
| 106c47d066281273d2e9a33baeb3dd17335a0ed3.cu | /*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "generic_ringbuf.cu.h"
#include <stdlib.h>
#include <stdio.h>
#include "util.cu.h"
__host__ void ringbuf_metadata_init(ringbuf_metadata_t** rb_cpu, ringbuf_metadata_t** rb_gpu, int num_elem)
{
ringbuf_metadata_t* rbm=(ringbuf_metadata_t*)malloc(sizeof(ringbuf_metadata_t)); // metadata in CPU
rbm->_size=num_elem;
rbm->_head=rbm->_tail=0;
(*rb_cpu)=rbm;
// metadata in CPU shared with GPU
CUDA_SAFE_CALL(cudaHostRegister(rbm,sizeof(ringbuf_metadata_t),cudaHostRegisterMapped));
CUDA_SAFE_CALL(cudaHostGetDevicePointer((void**)rb_gpu,(void*)rbm,0));
}
__host__ void ringbuf_metadata_free(ringbuf_metadata_t* rb_cpu)
{
CUDA_SAFE_CALL(cudaHostUnregister(rb_cpu));
free(rb_cpu);
}
|
823f702f433f5db6370d563a62ca4285bcf6e8ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scalar.h"
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {
return d1;
}
return d2;
}
extern "C"
__global__ void setvalorless_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
| 823f702f433f5db6370d563a62ca4285bcf6e8ad.cu | #include "scalar.h"
//scalar and current element
__device__ double op(double d1,double d2,double *params) {
if(d2 < d1) {
return d1;
}
return d2;
}
extern "C"
__global__ void setvalorless_scalar_double(int n, int idx,double dx,double *dy,int incx,double *params,double *result) {
transform(n,idx,dx,dy,incx,params,result);
}
|
51c5e9ae668dde61faf950d080ea2c3df0759ae3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/fluid/operators/metrics/accuracy_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D,
const int64_t* Xdata,
const int64_t* labeldata, int* correct_data,
float* accuracy, int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<float>(result) / static_cast<float>(N);
*total_data = N;
}
}
template <typename T>
class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* inference = ctx.Input<Tensor>("Out");
auto* indices = ctx.Input<Tensor>("Indices");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
auto* correct = ctx.Output<Tensor>("Correct");
auto* total = ctx.Output<Tensor>("Total");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices->data<int64_t>();
const int64_t* label_data = label->data<int64_t>();
int* correct_data = correct->mutable_data<int>(ctx.GetPlace());
int* total_data = total->mutable_data<int>(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
int num_samples = static_cast<int>(inference->dims()[0]);
size_t infer_width = inference->dims()[1];
auto stream = ctx.cuda_device_context().stream();
platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream);
if (num_samples == 0) {
return;
}
hipLaunchKernelGGL(( AccuracyCudaKernel<
PADDLE_CUDA_NUM_THREADS>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
num_samples, infer_width, indices_data, label_data, correct_data,
accuracy_data, total_data);
}
};
} // namespace operators
} // namespace paddle
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
REGISTER_OP_CUDA_KERNEL(
accuracy, paddle::operators::AccuracyOpCUDAKernel<float>,
paddle::operators::AccuracyOpCUDAKernel<double>,
paddle::operators::AccuracyOpCUDAKernel<paddle::platform::float16>);
| 51c5e9ae668dde61faf950d080ea2c3df0759ae3.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include "paddle/fluid/operators/metrics/accuracy_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <int BlockSize>
__global__ void AccuracyCudaKernel(const int N, const int D,
const int64_t* Xdata,
const int64_t* labeldata, int* correct_data,
float* accuracy, int* total_data) {
int count = 0;
__shared__ int total[BlockSize];
// support only 1 block
for (int i = threadIdx.x; i < (N); i += BlockSize) {
for (int j = 0; j < D; ++j) {
if (Xdata[i * D + j] == labeldata[i]) {
++count;
break;
}
}
}
total[threadIdx.x] = count;
__syncthreads();
// reduce the count with init value 0, and output accuracy.
#ifdef PADDLE_WITH_CUDA
int result = thrust::reduce(thrust::device, total, total + BlockSize, 0);
#else
// HIP thrust::reduce not support __device__
for (int s = BlockSize / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
total[threadIdx.x] += total[threadIdx.x + s];
}
__syncthreads();
}
int result = total[0];
#endif
if (threadIdx.x == 0) {
*correct_data = result;
*accuracy = static_cast<float>(result) / static_cast<float>(N);
*total_data = N;
}
}
template <typename T>
class AccuracyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* inference = ctx.Input<Tensor>("Out");
auto* indices = ctx.Input<Tensor>("Indices");
auto* label = ctx.Input<Tensor>("Label");
auto* accuracy = ctx.Output<Tensor>("Accuracy");
auto* correct = ctx.Output<Tensor>("Correct");
auto* total = ctx.Output<Tensor>("Total");
// FIXME(typhoonzero): only support indices currently
// if add support for output values, how to detect the data type?
const int64_t* indices_data = indices->data<int64_t>();
const int64_t* label_data = label->data<int64_t>();
int* correct_data = correct->mutable_data<int>(ctx.GetPlace());
int* total_data = total->mutable_data<int>(ctx.GetPlace());
float* accuracy_data = accuracy->mutable_data<float>(ctx.GetPlace());
int num_samples = static_cast<int>(inference->dims()[0]);
size_t infer_width = inference->dims()[1];
auto stream = ctx.cuda_device_context().stream();
platform::GpuMemsetAsync(accuracy_data, 0, sizeof(float), stream);
if (num_samples == 0) {
return;
}
AccuracyCudaKernel<
PADDLE_CUDA_NUM_THREADS><<<1, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
num_samples, infer_width, indices_data, label_data, correct_data,
accuracy_data, total_data);
}
};
} // namespace operators
} // namespace paddle
// FIXME(typhoonzero): types of T is for inference data.
// label data is always int64
REGISTER_OP_CUDA_KERNEL(
accuracy, paddle::operators::AccuracyOpCUDAKernel<float>,
paddle::operators::AccuracyOpCUDAKernel<double>,
paddle::operators::AccuracyOpCUDAKernel<paddle::platform::float16>);
|
f102da3da30b80c917d9be095ec4a0fcbf21f53f.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
/* reference:
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\1_Utilities\deviceQuery
*/
int get_device_info()
{
int device_count{ 0 };
// hipGetDeviceCount:
hipGetDeviceCount(&device_count);
fprintf(stdout, "GPU %d\n", device_count);
for (int dev = 0; dev < device_count; ++dev) {
int driver_version{ 0 }, runtime_version{ 0 };
/* hipSetDevice: GPU0
0,1,2... */
hipSetDevice(dev);
/* hipDeviceProp_t:
name: GeForce 940MX
totalGlobalMem ()
sharedMemPerBlock: ()
regsPerBlock: 32
warpSize (warp)
memPitch: pitch()
maxThreadsPerBlock:
maxThreadsDim[3]: (x,y,z)
maxGridSize: (x,y,z)
clockRate GPU()
totalConstMem: ()
major: GPUCUDA
minor:
textureAlignment:
deviceOverlap: GPU(Device Overlap),GPU
CUDA C,
asyncEngineCount
multiProcessorCount:
kernelExecTimeoutEnabled:
integrated: GPU
canMapHostMemory:
computeMode: CUDAcudaComputeMode
maxTexture1D:
maxTexture2D[2](x,y)
maxTexture3D[3]: (x,y,z)
memoryClockRate: ()
memoryBusWidth: (bits)
l2CacheSize: L2()
maxThreadsPerMultiProcessor
concurrentKernels:
asyncEngineCount:
unifiedAddressing:
*/
hipDeviceProp_t device_prop;
/* hipGetDeviceProperties: GPU */
hipGetDeviceProperties(&device_prop, dev);
fprintf(stdout, "\n %d : %s\n", dev, device_prop.name);
/* hipDriverGetVersion: CUDA */
hipDriverGetVersion(&driver_version);
fprintf(stdout, "CUDA %d.%d\n", driver_version/1000, (driver_version%1000)/10);
/* hipRuntimeGetVersion: CUDA */
hipRuntimeGetVersion(&runtime_version);
fprintf(stdout, "CUDA %d.%d\n", runtime_version/1000, (runtime_version%1000)/10);
fprintf(stdout, " %d.%d\n", device_prop.major, device_prop.minor);
fprintf(stdout, " %f MB, %llu bytes\n",
(float)device_prop.totalGlobalMem / (1024 * 1024), (unsigned long long)device_prop.totalGlobalMem);
fprintf(stdout, " %f KB, %lu bytes\n",
(float)device_prop.sharedMemPerBlock / 1024, device_prop.sharedMemPerBlock);
fprintf(stdout, "32: %d\n", device_prop.regsPerBlock);
fprintf(stdout, " %d\n", device_prop.warpSize);
fprintf(stdout, "pitch: %d bytes\n", device_prop.memPitch);
fprintf(stdout, ": %d\n", device_prop.maxThreadsPerBlock);
fprintf(stdout, "(x,y,z): (%d, %d, %d)\n",
device_prop.maxThreadsDim[0], device_prop.maxThreadsDim[1], device_prop.maxThreadsDim[2]);
fprintf(stdout, "(x,y,z): (%d, %d, %d)\n",
device_prop.maxGridSize[0], device_prop.maxGridSize[1], device_prop.maxGridSize[2]);
fprintf(stdout, "GPU: %.0f MHz (%0.2f GHz)\n",
device_prop.clockRate*1e-3f, device_prop.clockRate*1e-6f);
fprintf(stdout, ": %lu bytes\n", device_prop.totalConstMem);
fprintf(stdout, ": %lu bytes\n", device_prop.textureAlignment);
fprintf(stdout, ": %s\n", device_prop.deviceOverlap ? "Yes" : "No");
fprintf(stdout, ": %d\n", device_prop.multiProcessorCount);
fprintf(stdout, ": %s\n", device_prop.kernelExecTimeoutEnabled ? "Yes" : "No");
fprintf(stdout, "GPU: %s\n", device_prop.integrated ? "Yes" : "No");
fprintf(stdout, ": %s\n", device_prop.canMapHostMemory ? "Yes" : "No");
fprintf(stdout, "CUDA: %d\n", device_prop.computeMode);
fprintf(stdout, ": %d\n", device_prop.maxTexture1D);
fprintf(stdout, "(x,y): (%d, %d)\n", device_prop.maxTexture2D[0], device_prop.maxSurface2D[1]);
fprintf(stdout, "(x,y,z): (%d, %d, %d)\n",
device_prop.maxTexture3D[0], device_prop.maxSurface3D[1], device_prop.maxSurface3D[2]);
fprintf(stdout, ": %.0f Mhz\n", device_prop.memoryClockRate * 1e-3f);
fprintf(stdout, ": %d bits\n", device_prop.memoryBusWidth);
fprintf(stdout, "L2: %d bytes\n", device_prop.l2CacheSize);
fprintf(stdout, ": %d\n", device_prop.maxThreadsPerMultiProcessor);
fprintf(stdout, ": %s\n", device_prop.concurrentKernels ? "Yes" : "No");
fprintf(stdout, ": %d\n", device_prop.asyncEngineCount);
fprintf(stdout, ": %s\n", device_prop.unifiedAddressing ? "Yes" : "No");
}
return 0;
}
| f102da3da30b80c917d9be095ec4a0fcbf21f53f.cu | #include "funset.hpp"
#include <iostream>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
/* reference:
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v8.0\1_Utilities\deviceQuery
*/
int get_device_info()
{
int device_count{ 0 };
// cudaGetDeviceCount: 获得计算能力设备的数量
cudaGetDeviceCount(&device_count);
fprintf(stdout, "GPU设备的数量: %d\n", device_count);
for (int dev = 0; dev < device_count; ++dev) {
int driver_version{ 0 }, runtime_version{ 0 };
/* cudaSetDevice: 设置GPU执行时使用的设备,0表示能搜索到的第一
个设备号,如果有多个设备,则编号为0,1,2... */
cudaSetDevice(dev);
/* cudaDeviceProp: 设备属性结构体
name: 设备名字,如GeForce 940MX
totalGlobalMem: 设备上可用的全局内存总量(字节)
sharedMemPerBlock: 每一个线程块上可用的共享内存总量(字节)
regsPerBlock: 每一个线程块上可用的32位寄存器数量
warpSize: 一个线程束包含的线程数量,在实际运行中,线程块会被分割成更小的线程束(warp),
线程束中的每个线程都将在不同数据上执行相同的命令
memPitch: 在内存拷贝中允许的最大pitch数(字节)
maxThreadsPerBlock: 每一个线程块中支持的最大线程数量
maxThreadsDim[3]: 每一个线程块的每个维度的最大大小(x,y,z)
maxGridSize: 每一个线程格的每个维度的最大大小(x,y,z)
clockRate: GPU最大时钟频率(千赫兹)
totalConstMem: 设备上可用的常量内存总量(字节)
major: 设备计算能力主版本号,设备计算能力的版本描述了一种GPU对CUDA功能的支持程度
minor: 设备计算能力次版本号
textureAlignment: 纹理对齐要求
deviceOverlap: GPU是否支持设备重叠(Device Overlap)功能,支持设备重叠功能的GPU能够
在执行一个CUDA C核函数的同时,还能在设备与主机之间执行复制等操作,
已废弃,使用asyncEngineCount代替
multiProcessorCount: 设备上多处理器的数量
kernelExecTimeoutEnabled: 指定执行核函数时是否有运行时间限制
integrated: 设备是否是一个集成GPU
canMapHostMemory: 设备是否支持映射主机内存,可作为是否支持零拷贝内存的判断条件
computeMode: CUDA设备计算模式,可参考cudaComputeMode
maxTexture1D: 一维纹理支持的最大大小
maxTexture2D[2]:二维纹理支持的最大大小(x,y)
maxTexture3D[3]: 三维纹理支持的最大大小(x,y,z)
memoryClockRate: 内存时钟频率峰值(千赫兹)
memoryBusWidth: 全局内存总线宽度(bits)
l2CacheSize: L2缓存大小(字节)
maxThreadsPerMultiProcessor: 每个多处理器支持的最大线程数量
concurrentKernels: 设备是否支持同时执行多个核函数
asyncEngineCount: 异步引擎数量
unifiedAddressing: 是否支持设备与主机共享一个统一的地址空间
*/
cudaDeviceProp device_prop;
/* cudaGetDeviceProperties: 获取指定的GPU设备属性相关信息 */
cudaGetDeviceProperties(&device_prop, dev);
fprintf(stdout, "\n设备 %d 名字: %s\n", dev, device_prop.name);
/* cudaDriverGetVersion: 获取CUDA驱动版本 */
cudaDriverGetVersion(&driver_version);
fprintf(stdout, "CUDA驱动版本: %d.%d\n", driver_version/1000, (driver_version%1000)/10);
/* cudaRuntimeGetVersion: 获取CUDA运行时版本 */
cudaRuntimeGetVersion(&runtime_version);
fprintf(stdout, "CUDA运行时版本: %d.%d\n", runtime_version/1000, (runtime_version%1000)/10);
fprintf(stdout, "设备计算能力: %d.%d\n", device_prop.major, device_prop.minor);
fprintf(stdout, "设备上可用的全局内存总量: %f MB, %llu bytes\n",
(float)device_prop.totalGlobalMem / (1024 * 1024), (unsigned long long)device_prop.totalGlobalMem);
fprintf(stdout, "每一个线程块上可用的共享内存总量: %f KB, %lu bytes\n",
(float)device_prop.sharedMemPerBlock / 1024, device_prop.sharedMemPerBlock);
fprintf(stdout, "每一个线程块上可用的32位寄存器数量: %d\n", device_prop.regsPerBlock);
fprintf(stdout, "一个线程束包含的线程数量: %d\n", device_prop.warpSize);
fprintf(stdout, "在内存拷贝中允许的最大pitch数: %d bytes\n", device_prop.memPitch);
fprintf(stdout, "每一个线程块中支持的最大线程数量: %d\n", device_prop.maxThreadsPerBlock);
fprintf(stdout, "每一个线程块的每个维度的最大大小(x,y,z): (%d, %d, %d)\n",
device_prop.maxThreadsDim[0], device_prop.maxThreadsDim[1], device_prop.maxThreadsDim[2]);
fprintf(stdout, "每一个线程格的每个维度的最大大小(x,y,z): (%d, %d, %d)\n",
device_prop.maxGridSize[0], device_prop.maxGridSize[1], device_prop.maxGridSize[2]);
fprintf(stdout, "GPU最大时钟频率: %.0f MHz (%0.2f GHz)\n",
device_prop.clockRate*1e-3f, device_prop.clockRate*1e-6f);
fprintf(stdout, "设备上可用的常量内存总量: %lu bytes\n", device_prop.totalConstMem);
fprintf(stdout, "纹理对齐要求: %lu bytes\n", device_prop.textureAlignment);
fprintf(stdout, "是否支持设备重叠功能: %s\n", device_prop.deviceOverlap ? "Yes" : "No");
fprintf(stdout, "设备上多处理器的数量: %d\n", device_prop.multiProcessorCount);
fprintf(stdout, "执行核函数时是否有运行时间限制: %s\n", device_prop.kernelExecTimeoutEnabled ? "Yes" : "No");
fprintf(stdout, "设备是否是一个集成GPU: %s\n", device_prop.integrated ? "Yes" : "No");
fprintf(stdout, "设备是否支持映射主机内存: %s\n", device_prop.canMapHostMemory ? "Yes" : "No");
fprintf(stdout, "CUDA设备计算模式: %d\n", device_prop.computeMode);
fprintf(stdout, "一维纹理支持的最大大小: %d\n", device_prop.maxTexture1D);
fprintf(stdout, "二维纹理支持的最大大小(x,y): (%d, %d)\n", device_prop.maxTexture2D[0], device_prop.maxSurface2D[1]);
fprintf(stdout, "三维纹理支持的最大大小(x,y,z): (%d, %d, %d)\n",
device_prop.maxTexture3D[0], device_prop.maxSurface3D[1], device_prop.maxSurface3D[2]);
fprintf(stdout, "内存时钟频率峰值: %.0f Mhz\n", device_prop.memoryClockRate * 1e-3f);
fprintf(stdout, "全局内存总线宽度: %d bits\n", device_prop.memoryBusWidth);
fprintf(stdout, "L2缓存大小: %d bytes\n", device_prop.l2CacheSize);
fprintf(stdout, "每个多处理器支持的最大线程数量: %d\n", device_prop.maxThreadsPerMultiProcessor);
fprintf(stdout, "设备是否支持同时执行多个核函数: %s\n", device_prop.concurrentKernels ? "Yes" : "No");
fprintf(stdout, "异步引擎数量: %d\n", device_prop.asyncEngineCount);
fprintf(stdout, "是否支持设备与主机共享一个统一的地址空间: %s\n", device_prop.unifiedAddressing ? "Yes" : "No");
}
return 0;
}
|
935c7e10be9154eb19693c3d70286d17a300d81a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ctime>
// store data of image
unsigned char *data_rgb_image;
// cuda kernel
__global__ void RGBtoGrey(unsigned char *rgb, unsigned char *grey, int _row, int _col) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// calculate for only in the GridDim
if (col < _col && row < _row) {
// calculate Global threadID
int grey_offset = row * _col + col;
// 3 mean channel (R G B)
int rgb_offset = grey_offset * 3;
// use unsigned char because size of it is 0 - 255 that mach with color (0-255)
unsigned char r = rgb[rgb_offset + 0]; // 0 for red
unsigned char g = rgb[rgb_offset + 1]; // 1 for green
unsigned char b = rgb[rgb_offset + 2]; // 2 for blue
grey[grey_offset] = r * 0.21f + g * 0.71f + b * 0.07f;
}
}
__global__ void GreytoSobel(unsigned char *gray, unsigned char *sobelX, unsigned char *sobelY, int _row, int _col) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if ( col< _col && row < _row) {
// calculate Global threadID
int sobel_offset = row * _col + col;
int margin[3][3];
// Gx for mask of horizontal
int Gx[3][3] = {{1,0,-1},{2,0,-2},{1,0,-1}};
// Gy for mask of vertical
int Gy[3][3] = {{1,2,1},{0,0,0},{-1,-2,-1}};
//calculate margin for 3 x 3 matrix (neighbor of pixel)
margin[0][0] = (row-1) * _col + col - 1;
margin[0][1] = (row-1) * _col + col;
margin[0][2] = (row-1) * _col + col + 1;
margin[1][0] = row * _col + col - 1;
margin[1][1] = row * _col + col;
margin[1][2] = row * _col + col + 1;
margin[2][0] = (row+1) * _col + col - 1;
margin[2][1] = (row+1) * _col + col;
margin[2][2] = (row+1) * _col + col + 1;
int sumX = 0, sumY = 0;
// calcurate sobel
for(int i = 0; i < 3; i ++){
for(int j = 0; j < 3; j++){
sumX += gray[margin[i][j]] * Gx[i][j];
sumY += gray[margin[i][j]] * Gy[i][j];
}
}
if(sumX >= 255){
sumX = 255;
}
if(sumY >= 255){
sumY = 255;
}
if(sumX <= 0){
sumX = 0;
}
if(sumY <= 0){
sumY = 0;
}
// printf("%d\n", sumX);
sobelX[sobel_offset] = sumX;
sobelY[sobel_offset] = sumY;
}
}
//function for loading an image into rgb format unsigned char array
size_t loadImgFile(unsigned char *grey_image, const std::string &input_file, int *rows, int *cols) {
cv::Mat img_data; //opencv Mat object
//read image data into img_data Mat object
img_data = cv::imread(input_file.c_str(), cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
if (img_data.empty()) {
std::cerr << "Unable to laod image file: " << input_file << std::endl;
}
*rows = img_data.rows;
*cols = img_data.cols;
//allocate memory for host rgb data array
data_rgb_image = (unsigned char*) malloc(*rows * *cols * sizeof(unsigned char) * 3);
unsigned char* rgb_image = (unsigned char*)img_data.data;
//populate host's rgb data array
int x = 0;
for (x = 0; x < *rows * *cols * 3; x++) {
data_rgb_image[x] = rgb_image[x];
}
// return size of image in pixel
return img_data.rows * img_data.cols;
}
// method for writing image file of gray scale
void outputImg(const std::string& output_file, unsigned char* grey_image, int _row, int _col) {
//serialize gray data array into opencv's Mat object
cv::Mat greyData(_row, _col, CV_8UC1,(void *) grey_image);
//write Mat object to file
cv::imwrite(output_file.c_str(), greyData);
printf("Transfer complete\n");
}
// size_t (unsigned integer)
size_t loadImgFile(unsigned char *grey_img, const std::string &input_file, int *_row, int *_col );
void outputImg(const std::string &output_file, unsigned char *grey_image, int _row, int _col);
int main(int argc, char **argv) {
clock_t begin = clock();
std::string input_file;
std::string gray_file;
std::string horizontal_sobel_file;
std::string vertical_sobel_file;
//Check for the input file and output file names
switch(argc) {
case 5:
input_file = std::string(argv[1]);
gray_file = std::string(argv[2]);
horizontal_sobel_file = std::string(argv[3]);
vertical_sobel_file = std::string(argv[4]);
break;
default:
std::cerr << "Usage: <executable> input_file gray_file horizontal_sobel_file vertical_sobel_file\n";
exit(1);
}
unsigned char *d_rgb_image; //array for storing rgb data on device
unsigned char *data_grey_image = 0;
unsigned char *data_sobel_x_image = 0;
unsigned char *data_sobel_y_image = 0;
unsigned char *d_grey_image = 0; //host and device's grey data array pointers
unsigned char *d_sobel_x = 0;
unsigned char *d_sobel_y = 0;
int p_rows; //number of rows of pixels
int p_cols; //number of columns of pixels
//load image into an array and retrieve number of pixels
const size_t total_pixels = loadImgFile(data_grey_image, input_file, &p_rows, &p_cols);
//allocate memory of host's grey data array
data_grey_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
data_sobel_x_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
data_sobel_y_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
//allocate and initialize memory on device
hipMalloc(&d_rgb_image, sizeof(unsigned char) * total_pixels * 3); // 3 is for channel (R G B)
hipMalloc(&d_grey_image, sizeof(unsigned char) * total_pixels);
hipMalloc(&d_sobel_x, sizeof(unsigned char) * total_pixels);
hipMalloc(&d_sobel_y, sizeof(unsigned char) * total_pixels);
hipMemset(d_grey_image, 0, sizeof(unsigned char) * total_pixels);
//copy host rgb data array to device data array
hipMemcpy(d_rgb_image, data_rgb_image, sizeof(unsigned char) * total_pixels * 3, hipMemcpyHostToDevice);
hipMemcpy(d_sobel_x, data_sobel_x_image, sizeof(unsigned char) * total_pixels, hipMemcpyHostToDevice);
hipMemcpy(d_sobel_y, data_sobel_y_image, sizeof(unsigned char) * total_pixels, hipMemcpyHostToDevice);
//define block and grid dimensions
const dim3 dimGrid((int)ceil((p_cols)/64), (int)ceil((p_rows)/64));
const dim3 dimBlock(64, 64);
//execute cuda kernel
hipLaunchKernelGGL(( RGBtoGrey), dim3(dimGrid), dim3(dimBlock), 0, 0, d_rgb_image, d_grey_image, p_rows, p_cols);
clock_t end = clock();
double elapsed_secs = double(end - begin);
fflush(stdout);
// printf("Time used: %ls",elapsed_secs);
// printf("Error %d: %s.\n", id, errors[id]);
// std::cout << "Error " << id << ": " << errors[id] << "." << std::endl;
std::cout <<"Time used: " << elapsed_secs << std::endl;
hipLaunchKernelGGL(( GreytoSobel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_grey_image,d_sobel_x, d_sobel_y, p_rows, p_cols);
//copy computed gray data array from device to host
hipMemcpy(data_grey_image, d_grey_image, sizeof(unsigned char) * total_pixels, hipMemcpyDeviceToHost);
hipMemcpy(data_sobel_x_image, d_sobel_x, sizeof(unsigned char) * total_pixels, hipMemcpyDeviceToHost);
hipMemcpy(data_sobel_y_image, d_sobel_y, sizeof(unsigned char) * total_pixels, hipMemcpyDeviceToHost);
//output the grayscale image
outputImg(gray_file, data_grey_image, p_rows, p_cols);
outputImg(horizontal_sobel_file, data_sobel_x_image, p_rows, p_cols);
outputImg(vertical_sobel_file, data_sobel_y_image, p_rows, p_cols);
hipFree(d_rgb_image);
hipFree(d_grey_image);
hipFree(d_sobel_x);
hipFree(d_sobel_y);
// clock_t end = clock();
// double elapsed_secs = double(end - begin);
// fflush(stdout);
// // printf("Time used: %ls",elapsed_secs);
// // printf("Error %d: %s.\n", id, errors[id]);
// // std::cout << "Error " << id << ": " << errors[id] << "." << std::endl;
// std::cout <<"Time used: " << elapsed_secs << std::endl;
return 0;
}
| 935c7e10be9154eb19693c3d70286d17a300d81a.cu | #include <stdio.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <ctime>
// store data of image
unsigned char *data_rgb_image;
// cuda kernel
__global__ void RGBtoGrey(unsigned char *rgb, unsigned char *grey, int _row, int _col) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// calculate for only in the GridDim
if (col < _col && row < _row) {
// calculate Global threadID
int grey_offset = row * _col + col;
// 3 mean channel (R G B)
int rgb_offset = grey_offset * 3;
// use unsigned char because size of it is 0 - 255 that mach with color (0-255)
unsigned char r = rgb[rgb_offset + 0]; // 0 for red
unsigned char g = rgb[rgb_offset + 1]; // 1 for green
unsigned char b = rgb[rgb_offset + 2]; // 2 for blue
grey[grey_offset] = r * 0.21f + g * 0.71f + b * 0.07f;
}
}
__global__ void GreytoSobel(unsigned char *gray, unsigned char *sobelX, unsigned char *sobelY, int _row, int _col) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if ( col< _col && row < _row) {
// calculate Global threadID
int sobel_offset = row * _col + col;
int margin[3][3];
// Gx for mask of horizontal
int Gx[3][3] = {{1,0,-1},{2,0,-2},{1,0,-1}};
// Gy for mask of vertical
int Gy[3][3] = {{1,2,1},{0,0,0},{-1,-2,-1}};
//calculate margin for 3 x 3 matrix (neighbor of pixel)
margin[0][0] = (row-1) * _col + col - 1;
margin[0][1] = (row-1) * _col + col;
margin[0][2] = (row-1) * _col + col + 1;
margin[1][0] = row * _col + col - 1;
margin[1][1] = row * _col + col;
margin[1][2] = row * _col + col + 1;
margin[2][0] = (row+1) * _col + col - 1;
margin[2][1] = (row+1) * _col + col;
margin[2][2] = (row+1) * _col + col + 1;
int sumX = 0, sumY = 0;
// calcurate sobel
for(int i = 0; i < 3; i ++){
for(int j = 0; j < 3; j++){
sumX += gray[margin[i][j]] * Gx[i][j];
sumY += gray[margin[i][j]] * Gy[i][j];
}
}
if(sumX >= 255){
sumX = 255;
}
if(sumY >= 255){
sumY = 255;
}
if(sumX <= 0){
sumX = 0;
}
if(sumY <= 0){
sumY = 0;
}
// printf("%d\n", sumX);
sobelX[sobel_offset] = sumX;
sobelY[sobel_offset] = sumY;
}
}
//function for loading an image into rgb format unsigned char array
size_t loadImgFile(unsigned char *grey_image, const std::string &input_file, int *rows, int *cols) {
cv::Mat img_data; //opencv Mat object
//read image data into img_data Mat object
img_data = cv::imread(input_file.c_str(), cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
if (img_data.empty()) {
std::cerr << "Unable to laod image file: " << input_file << std::endl;
}
*rows = img_data.rows;
*cols = img_data.cols;
//allocate memory for host rgb data array
data_rgb_image = (unsigned char*) malloc(*rows * *cols * sizeof(unsigned char) * 3);
unsigned char* rgb_image = (unsigned char*)img_data.data;
//populate host's rgb data array
int x = 0;
for (x = 0; x < *rows * *cols * 3; x++) {
data_rgb_image[x] = rgb_image[x];
}
// return size of image in pixel
return img_data.rows * img_data.cols;
}
// method for writing image file of gray scale
void outputImg(const std::string& output_file, unsigned char* grey_image, int _row, int _col) {
//serialize gray data array into opencv's Mat object
cv::Mat greyData(_row, _col, CV_8UC1,(void *) grey_image);
//write Mat object to file
cv::imwrite(output_file.c_str(), greyData);
printf("Transfer complete\n");
}
// size_t (unsigned integer)
size_t loadImgFile(unsigned char *grey_img, const std::string &input_file, int *_row, int *_col );
void outputImg(const std::string &output_file, unsigned char *grey_image, int _row, int _col);
int main(int argc, char **argv) {
clock_t begin = clock();
std::string input_file;
std::string gray_file;
std::string horizontal_sobel_file;
std::string vertical_sobel_file;
//Check for the input file and output file names
switch(argc) {
case 5:
input_file = std::string(argv[1]);
gray_file = std::string(argv[2]);
horizontal_sobel_file = std::string(argv[3]);
vertical_sobel_file = std::string(argv[4]);
break;
default:
std::cerr << "Usage: <executable> input_file gray_file horizontal_sobel_file vertical_sobel_file\n";
exit(1);
}
unsigned char *d_rgb_image; //array for storing rgb data on device
unsigned char *data_grey_image = 0;
unsigned char *data_sobel_x_image = 0;
unsigned char *data_sobel_y_image = 0;
unsigned char *d_grey_image = 0; //host and device's grey data array pointers
unsigned char *d_sobel_x = 0;
unsigned char *d_sobel_y = 0;
int p_rows; //number of rows of pixels
int p_cols; //number of columns of pixels
//load image into an array and retrieve number of pixels
const size_t total_pixels = loadImgFile(data_grey_image, input_file, &p_rows, &p_cols);
//allocate memory of host's grey data array
data_grey_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
data_sobel_x_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
data_sobel_y_image = (unsigned char *)malloc(sizeof(unsigned char*)* total_pixels);
//allocate and initialize memory on device
cudaMalloc(&d_rgb_image, sizeof(unsigned char) * total_pixels * 3); // 3 is for channel (R G B)
cudaMalloc(&d_grey_image, sizeof(unsigned char) * total_pixels);
cudaMalloc(&d_sobel_x, sizeof(unsigned char) * total_pixels);
cudaMalloc(&d_sobel_y, sizeof(unsigned char) * total_pixels);
cudaMemset(d_grey_image, 0, sizeof(unsigned char) * total_pixels);
//copy host rgb data array to device data array
cudaMemcpy(d_rgb_image, data_rgb_image, sizeof(unsigned char) * total_pixels * 3, cudaMemcpyHostToDevice);
cudaMemcpy(d_sobel_x, data_sobel_x_image, sizeof(unsigned char) * total_pixels, cudaMemcpyHostToDevice);
cudaMemcpy(d_sobel_y, data_sobel_y_image, sizeof(unsigned char) * total_pixels, cudaMemcpyHostToDevice);
//define block and grid dimensions
const dim3 dimGrid((int)ceil((p_cols)/64), (int)ceil((p_rows)/64));
const dim3 dimBlock(64, 64);
//execute cuda kernel
RGBtoGrey<<<dimGrid, dimBlock>>>(d_rgb_image, d_grey_image, p_rows, p_cols);
clock_t end = clock();
double elapsed_secs = double(end - begin);
fflush(stdout);
// printf("Time used: %ls",elapsed_secs);
// printf("Error %d: %s.\n", id, errors[id]);
// std::cout << "Error " << id << ": " << errors[id] << "." << std::endl;
std::cout <<"Time used: " << elapsed_secs << std::endl;
GreytoSobel<<<dimGrid, dimBlock>>>(d_grey_image,d_sobel_x, d_sobel_y, p_rows, p_cols);
//copy computed gray data array from device to host
cudaMemcpy(data_grey_image, d_grey_image, sizeof(unsigned char) * total_pixels, cudaMemcpyDeviceToHost);
cudaMemcpy(data_sobel_x_image, d_sobel_x, sizeof(unsigned char) * total_pixels, cudaMemcpyDeviceToHost);
cudaMemcpy(data_sobel_y_image, d_sobel_y, sizeof(unsigned char) * total_pixels, cudaMemcpyDeviceToHost);
//output the grayscale image
outputImg(gray_file, data_grey_image, p_rows, p_cols);
outputImg(horizontal_sobel_file, data_sobel_x_image, p_rows, p_cols);
outputImg(vertical_sobel_file, data_sobel_y_image, p_rows, p_cols);
cudaFree(d_rgb_image);
cudaFree(d_grey_image);
cudaFree(d_sobel_x);
cudaFree(d_sobel_y);
// clock_t end = clock();
// double elapsed_secs = double(end - begin);
// fflush(stdout);
// // printf("Time used: %ls",elapsed_secs);
// // printf("Error %d: %s.\n", id, errors[id]);
// // std::cout << "Error " << id << ": " << errors[id] << "." << std::endl;
// std::cout <<"Time used: " << elapsed_secs << std::endl;
return 0;
}
|
175f7c49bbf8a69b2d7a3539d7b65835e4970705.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_analysis.h"
#include "cs_header.h"
#include "cs_block.h"
#include "cs_perm_mlseq.h"
#include "cs_expand.h"
#include "cs_interpolate.h"
#include "cs_perm_selection.h"
#include "cs_copy_box.h"
#include "cs_motion_detect.h"
#include "cs_motion_detect_v2.h"
// #include "cs_edge_detect.h"
#include "cs_edge_detect_v2.h"
#include "cs_ipcam.h"
#define CUDA_DBG
int *dp1 = NULL, *dp2 = NULL ;
int *hp1 = NULL, *hp2 = NULL ;
#define NUM_OF_HVT_INDEX 3
#define BUF_SIZE ( 1024 * 1024 )
#define BUF_SIZE_INT ( BUF_SIZE * sizeof (int))
struct cs_xyz hcube[ CUBE_INFO_CNT ], *dcubep ;
struct cube cubecube[ CUBE_INFO_CNT ] ;
int
main( int ac, char *av[] )
{
#ifdef CUDA_OBS
int orig, rec_size, hvt_size;
#endif
int k, i, *dp ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
if (( k = hipMalloc( &dcubep, sizeof ( cube ))) != hipSuccess )
{
printf("%s: d_cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
if (( k = hipMalloc( &dp1, BUF_SIZE_INT )) != hipSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
if (( k = hipMalloc( &dp2, BUF_SIZE_INT )) != hipSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
#ifdef CUDA_OBS
// testing of the ipcam stuff
ipcam_init ( 5, 640, 480 ) ;
exit(2) ;
#endif
dbg_init( 1024 * 1024 ) ;
cubecube[0].x = hcube[0].x = 12 ;
cubecube[0].y = hcube[0].y = 14 ;
cubecube[0].z = hcube[0].z = 3 ;
cubecube[1].x = hcube[1].x = 10 ;
cubecube[1].y = hcube[1].y = 8 ;
cubecube[1].z = hcube[1].z = 3 ;
cubecube[2].x = hcube[2].x = 8 ;
cubecube[2].y = hcube[2].y = 6 ;
cubecube[2].z = hcube[2].z = 4 ;
h_set_config( dcubep, cubecube ) ;
#ifdef CUDA_OBS
dbg_p_d_data_i ( "cube 1", ( int *)dcubep, sizeof( hcube ) / sizeof ( int)) ;
#endif
hp1 = ( int * )malloc ( BUF_SIZE_INT ) ;
hp2 = ( int * )malloc ( BUF_SIZE_INT ) ;
dp = hp1 ;
for ( i = 0 ; i < BUF_SIZE ; i++ )
{
*dp++ = rand() & 0xff ;
#ifdef CUDA_OBS
*dp++ = i ;
*dp++ = rand() & 0xff ;
if ( i & 1 )
*dp++ = i+100 ;
else
*dp++ = i-100 ;
#endif
}
set_device_mem_i( dp2, BUF_SIZE, 111 ) ;
if (( i = hipMemcpy( dp1, hp1, BUF_SIZE_INT,
hipMemcpyHostToDevice)) != hipSuccess )
{
printf("%s: cp failed %d\n", __func__, i ) ;
exit( 0 ) ;
}
#ifdef CUDA_DBG
dbg_p_d_data_i ( "dp1 original", dp1, 25 ) ;
#endif
#ifdef CUDA_OBS
// test of dbg_p_d_data_i_mn_v2
dbg_p_d_data_i_mn_v2( "dp1 init", dp1, 12 * 14 * 3 * 3 * 3, 12,
hcube, 3 ,3 ) ;
#endif
// do the test here ..
#ifdef CUDA_OBS
// blocking
// not tested ... not tested ... not tested ...
h_make_block( dp1, dp2,
30, 16, // x/y
480, // frame size ... x * y
10, 8, 2 // block ... x/y/z
0, // no perm
5, 4, // overlap ... x/y
5, 3, // num of blocks in x/y
2, 3, // append
0, 0 ) ; // no weight, no shift
exit( 23 ) ;
#endif
#ifdef CUDA_OBS
// motion detection
k = h_do_motion_idx_v2 ( dp2, 1000 * 1000, &orig, 3, 3, cubecube,
2, 1, 2, &rec_size ) ;
if ( !k )
{
printf("motion failed") ;
exit( 1 ) ;
}
printf("orig idx is %d size %d\n", orig, rec_size ) ;
hvt_size = 5 * 3 * 2 ; // ( 2 * 2 + 1 ) * ( 1 * 2 + 1 ) * 2
dbg_p_d_data_i_mn ( "dp2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * ( hvt_size * 3 * 3 ),
rec_size + NUM_OF_HVT_INDEX, hvt_size * 3 * 3, 6 ) ;
dbg_p_d_data_i ( "dp1 original", dp1, 30 ) ;
// step 0 : copy data ...
k = h_do_motion_detection_step0_v2 ( dp1, dp2,
rec_size * hvt_size * 3 * 3,
rec_size,
4, 2, 2,
dcubep,
hvt_size, cubecube[0].x * cubecube[0].y * cubecube[0].z ) ;
dbg_p_d_data_i ( "dp1 after", dp1, 30 ) ;
dbg_p_d_data_i_mn ( "motion 1", dp1, 12 * 14 * 3 * 9, 12, 14, 12 ) ;
dbg_p_d_data_i_mn ( "motion 2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// do step 1
for ( k = 0 ; k < CUBE_INFO_CNT ; k++ )
{
cubecube[k].x -= 4 ;
cubecube[k].y -= 2 ;
cubecube[k].z -= 1 ;
}
h_set_config ( dcubep, cubecube ) ;
h_do_l1_norm_step1_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size, orig, hvt_size ) ;
dbg_p_d_data_i_mn ( "step 1", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// step 2
// step 2 l1_norm ...
k = h_do_l1_norm_step2_v2( dp2, rec_size * hvt_size * 3 * 3,
rec_size, cubecube, dcubep) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
dbg_p_d_data_i ( "dcubep after step 2", ( int *)dcubep, 9 ) ;
// step 3
k = h_do_l1_norm_step3_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size,
orig, hvt_size ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 3", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// step 4
k = h_do_l1_norm_step4_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size,
orig, hvt_size, hp1 ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 4", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
dbg_pdata_i( "return values", hp1, 3 * 3 * sizeof ( int )) ;
exit( 23 ) ;
#endif
#ifdef CUDA_OBS
// edge v2 test
{
int edge_x = 1, edge_y = 2, blk_in_x = 4, blk_in_y = 3 ;
i = blk_in_x * blk_in_y * (hcube[0].x * hcube[0].y * hcube[0].z) ;
h_do_edge_detection_v2 ( dp1, dp2, i,
dcubep, edge_x, edge_y, blk_in_x, blk_in_y,
( struct cube * )&cubecube[0] ) ;
dbg_p_d_data_i_mn_v2 ( "edge_v2 orig", dp1, i, hcube[0].x, hcube,
blk_in_x, blk_in_y ) ;
dbg_p_d_data_i_mn_v2 ( "edge_v2 get", dp2, i, hcube[0].x, hcube,
blk_in_x, blk_in_y ) ;
i = h_do_copy_box_v2 ( dp2, dp1, i,
edge_x, edge_y, blk_in_x, blk_in_y, dcubep, cubecube ) ;
dbg_p_d_data_i_mn ( "after copy", dp1, ( hcube[0].x - edge_x * 2 ) *
( hcube[0].y - edge_y * 2 ) * hcube[0].z * blk_in_x * blk_in_y,
hcube[0].x - edge_x * 2, hcube[0].y - edge_y * 2,
hcube[0].x - edge_x * 2) ;
}
#endif
#ifdef CUDA_OBS
// do step 1
dbg_p_d_data_i_mn ( "l1", dp1, ( 10 * 8 ), 10, 8, 10 ) ;
h_do_l1_norm_step1( dp1, 100, 7, 4 ) ;
dbg_p_d_data_i_mn ( "l1 done", dp1, ( 10 * 8 ), 10, 8, 10 ) ;
#endif
#ifdef CUDA_OBS
// step 3 l1-norm
dbg_p_d_data_i_mn ( "l3 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step3( dp1, 7 * 8, 8, 3) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "l3 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
// step 4 find min
dbg_p_d_data_i_mn ( "l4 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step4( dp1, 8 * 7, 8 , 3, a ) ;
printf("k is %d -- %d %d %d %d \n", k, a[0], a[1], a[2], a[3] ) ;
dbg_p_d_data_i_mn ( "l4 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
#endif
#ifdef CUDA_OBS
// step 2 l1_norm ...
dbg_p_d_data_i_mn ( "l2 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step2( dp1, 7 * 8, 8) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "l2 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
#endif
#ifdef CUDA_OBS
//abs() test
dbg_p_d_data_i_mn ( "abs", dp1, ( 13 * 8 ), 13, 8, 13 ) ;
k = h_set_abs ( dp1, 100, 10, 3 ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "abs done", dp1, ( 13 * 8 ), 13, 8, 13 ) ;
#endif
#ifdef CUDA_OBS
// motion detection
k = h_do_motion_idx ( dp2, 100 * 100, 72,
3, 3, 3, &i ) ;
if ( !k )
{
printf("motion failed") ;
exit( 1 ) ;
}
printf("orig idx is %d\n", i ) ;
dbg_p_d_data_i_mn ( "dp1", dp1, (6*8*5), 6, 8, 6 ) ;
k = h_do_motion_detection ( dp1, dp2,
72 * 27,
75, // 4 * 6 * 3
6, 48,
4, 24 ) ;
dbg_p_d_data_i_mn ( "motion", dp2, ( 75 * 28 ), 75, 28, 75 ) ;
#endif
#ifdef CUDA_OBS
k = h_do_copy_box ( dp1, dp2, 96, 8,6,1,1 ) ;
if ( !k )
{
printf("copy failed") ;
exit( 1 ) ;
}
dbg_p_d_data_i_mn ( "copy before", dp1, 128, 8, 6, 8 ) ;
dbg_p_d_data_i_mn ( "copy done", dp2, 100, 6, 4, 6 ) ;
#endif
}
| 175f7c49bbf8a69b2d7a3539d7b65835e4970705.cu | #include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_analysis.h"
#include "cs_header.h"
#include "cs_block.h"
#include "cs_perm_mlseq.h"
#include "cs_expand.h"
#include "cs_interpolate.h"
#include "cs_perm_selection.h"
#include "cs_copy_box.h"
#include "cs_motion_detect.h"
#include "cs_motion_detect_v2.h"
// #include "cs_edge_detect.h"
#include "cs_edge_detect_v2.h"
#include "cs_ipcam.h"
#define CUDA_DBG
int *dp1 = NULL, *dp2 = NULL ;
int *hp1 = NULL, *hp2 = NULL ;
#define NUM_OF_HVT_INDEX 3
#define BUF_SIZE ( 1024 * 1024 )
#define BUF_SIZE_INT ( BUF_SIZE * sizeof (int))
struct cs_xyz hcube[ CUBE_INFO_CNT ], *dcubep ;
struct cube cubecube[ CUBE_INFO_CNT ] ;
int
main( int ac, char *av[] )
{
#ifdef CUDA_OBS
int orig, rec_size, hvt_size;
#endif
int k, i, *dp ;
setbuf( stdout, NULL ) ;
setbuf( stderr, NULL ) ;
if (( k = cudaMalloc( &dcubep, sizeof ( cube ))) != cudaSuccess )
{
printf("%s: d_cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
if (( k = cudaMalloc( &dp1, BUF_SIZE_INT )) != cudaSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
if (( k = cudaMalloc( &dp2, BUF_SIZE_INT )) != cudaSuccess )
{
printf("%s: cube alloc failed %d \n", __func__, k ) ;
exit ( 0 ) ;
}
#ifdef CUDA_OBS
// testing of the ipcam stuff
ipcam_init ( 5, 640, 480 ) ;
exit(2) ;
#endif
dbg_init( 1024 * 1024 ) ;
cubecube[0].x = hcube[0].x = 12 ;
cubecube[0].y = hcube[0].y = 14 ;
cubecube[0].z = hcube[0].z = 3 ;
cubecube[1].x = hcube[1].x = 10 ;
cubecube[1].y = hcube[1].y = 8 ;
cubecube[1].z = hcube[1].z = 3 ;
cubecube[2].x = hcube[2].x = 8 ;
cubecube[2].y = hcube[2].y = 6 ;
cubecube[2].z = hcube[2].z = 4 ;
h_set_config( dcubep, cubecube ) ;
#ifdef CUDA_OBS
dbg_p_d_data_i ( "cube 1", ( int *)dcubep, sizeof( hcube ) / sizeof ( int)) ;
#endif
hp1 = ( int * )malloc ( BUF_SIZE_INT ) ;
hp2 = ( int * )malloc ( BUF_SIZE_INT ) ;
dp = hp1 ;
for ( i = 0 ; i < BUF_SIZE ; i++ )
{
*dp++ = rand() & 0xff ;
#ifdef CUDA_OBS
*dp++ = i ;
*dp++ = rand() & 0xff ;
if ( i & 1 )
*dp++ = i+100 ;
else
*dp++ = i-100 ;
#endif
}
set_device_mem_i( dp2, BUF_SIZE, 111 ) ;
if (( i = cudaMemcpy( dp1, hp1, BUF_SIZE_INT,
cudaMemcpyHostToDevice)) != cudaSuccess )
{
printf("%s: cp failed %d\n", __func__, i ) ;
exit( 0 ) ;
}
#ifdef CUDA_DBG
dbg_p_d_data_i ( "dp1 original", dp1, 25 ) ;
#endif
#ifdef CUDA_OBS
// test of dbg_p_d_data_i_mn_v2
dbg_p_d_data_i_mn_v2( "dp1 init", dp1, 12 * 14 * 3 * 3 * 3, 12,
hcube, 3 ,3 ) ;
#endif
// do the test here ..
#ifdef CUDA_OBS
// blocking
// not tested ... not tested ... not tested ...
h_make_block( dp1, dp2,
30, 16, // x/y
480, // frame size ... x * y
10, 8, 2 // block ... x/y/z
0, // no perm
5, 4, // overlap ... x/y
5, 3, // num of blocks in x/y
2, 3, // append
0, 0 ) ; // no weight, no shift
exit( 23 ) ;
#endif
#ifdef CUDA_OBS
// motion detection
k = h_do_motion_idx_v2 ( dp2, 1000 * 1000, &orig, 3, 3, cubecube,
2, 1, 2, &rec_size ) ;
if ( !k )
{
printf("motion failed") ;
exit( 1 ) ;
}
printf("orig idx is %d size %d\n", orig, rec_size ) ;
hvt_size = 5 * 3 * 2 ; // ( 2 * 2 + 1 ) * ( 1 * 2 + 1 ) * 2
dbg_p_d_data_i_mn ( "dp2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * ( hvt_size * 3 * 3 ),
rec_size + NUM_OF_HVT_INDEX, hvt_size * 3 * 3, 6 ) ;
dbg_p_d_data_i ( "dp1 original", dp1, 30 ) ;
// step 0 : copy data ...
k = h_do_motion_detection_step0_v2 ( dp1, dp2,
rec_size * hvt_size * 3 * 3,
rec_size,
4, 2, 2,
dcubep,
hvt_size, cubecube[0].x * cubecube[0].y * cubecube[0].z ) ;
dbg_p_d_data_i ( "dp1 after", dp1, 30 ) ;
dbg_p_d_data_i_mn ( "motion 1", dp1, 12 * 14 * 3 * 9, 12, 14, 12 ) ;
dbg_p_d_data_i_mn ( "motion 2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// do step 1
for ( k = 0 ; k < CUBE_INFO_CNT ; k++ )
{
cubecube[k].x -= 4 ;
cubecube[k].y -= 2 ;
cubecube[k].z -= 1 ;
}
h_set_config ( dcubep, cubecube ) ;
h_do_l1_norm_step1_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size, orig, hvt_size ) ;
dbg_p_d_data_i_mn ( "step 1", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// step 2
// step 2 l1_norm ...
k = h_do_l1_norm_step2_v2( dp2, rec_size * hvt_size * 3 * 3,
rec_size, cubecube, dcubep) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 2", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
dbg_p_d_data_i ( "dcubep after step 2", ( int *)dcubep, 9 ) ;
// step 3
k = h_do_l1_norm_step3_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size,
orig, hvt_size ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 3", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
// step 4
k = h_do_l1_norm_step4_v2( dp2, rec_size * hvt_size * 3 * 3, rec_size,
orig, hvt_size, hp1 ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "step 4", dp2, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * 3 * 3,
rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ;
dbg_pdata_i( "return values", hp1, 3 * 3 * sizeof ( int )) ;
exit( 23 ) ;
#endif
#ifdef CUDA_OBS
// edge v2 test
{
int edge_x = 1, edge_y = 2, blk_in_x = 4, blk_in_y = 3 ;
i = blk_in_x * blk_in_y * (hcube[0].x * hcube[0].y * hcube[0].z) ;
h_do_edge_detection_v2 ( dp1, dp2, i,
dcubep, edge_x, edge_y, blk_in_x, blk_in_y,
( struct cube * )&cubecube[0] ) ;
dbg_p_d_data_i_mn_v2 ( "edge_v2 orig", dp1, i, hcube[0].x, hcube,
blk_in_x, blk_in_y ) ;
dbg_p_d_data_i_mn_v2 ( "edge_v2 get", dp2, i, hcube[0].x, hcube,
blk_in_x, blk_in_y ) ;
i = h_do_copy_box_v2 ( dp2, dp1, i,
edge_x, edge_y, blk_in_x, blk_in_y, dcubep, cubecube ) ;
dbg_p_d_data_i_mn ( "after copy", dp1, ( hcube[0].x - edge_x * 2 ) *
( hcube[0].y - edge_y * 2 ) * hcube[0].z * blk_in_x * blk_in_y,
hcube[0].x - edge_x * 2, hcube[0].y - edge_y * 2,
hcube[0].x - edge_x * 2) ;
}
#endif
#ifdef CUDA_OBS
// do step 1
dbg_p_d_data_i_mn ( "l1", dp1, ( 10 * 8 ), 10, 8, 10 ) ;
h_do_l1_norm_step1( dp1, 100, 7, 4 ) ;
dbg_p_d_data_i_mn ( "l1 done", dp1, ( 10 * 8 ), 10, 8, 10 ) ;
#endif
#ifdef CUDA_OBS
// step 3 l1-norm
dbg_p_d_data_i_mn ( "l3 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step3( dp1, 7 * 8, 8, 3) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "l3 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
// step 4 find min
dbg_p_d_data_i_mn ( "l4 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step4( dp1, 8 * 7, 8 , 3, a ) ;
printf("k is %d -- %d %d %d %d \n", k, a[0], a[1], a[2], a[3] ) ;
dbg_p_d_data_i_mn ( "l4 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
#endif
#ifdef CUDA_OBS
// step 2 l1_norm ...
dbg_p_d_data_i_mn ( "l2 before", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
k = h_do_l1_norm_step2( dp1, 7 * 8, 8) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "l2 done", dp1, ( 11 * 7 ), 11, 7, 11 ) ;
#endif
#ifdef CUDA_OBS
//abs() test
dbg_p_d_data_i_mn ( "abs", dp1, ( 13 * 8 ), 13, 8, 13 ) ;
k = h_set_abs ( dp1, 100, 10, 3 ) ;
printf("k is %d\n", k ) ;
dbg_p_d_data_i_mn ( "abs done", dp1, ( 13 * 8 ), 13, 8, 13 ) ;
#endif
#ifdef CUDA_OBS
// motion detection
k = h_do_motion_idx ( dp2, 100 * 100, 72,
3, 3, 3, &i ) ;
if ( !k )
{
printf("motion failed") ;
exit( 1 ) ;
}
printf("orig idx is %d\n", i ) ;
dbg_p_d_data_i_mn ( "dp1", dp1, (6*8*5), 6, 8, 6 ) ;
k = h_do_motion_detection ( dp1, dp2,
72 * 27,
75, // 4 * 6 * 3
6, 48,
4, 24 ) ;
dbg_p_d_data_i_mn ( "motion", dp2, ( 75 * 28 ), 75, 28, 75 ) ;
#endif
#ifdef CUDA_OBS
k = h_do_copy_box ( dp1, dp2, 96, 8,6,1,1 ) ;
if ( !k )
{
printf("copy failed") ;
exit( 1 ) ;
}
dbg_p_d_data_i_mn ( "copy before", dp1, 128, 8, 6, 8 ) ;
dbg_p_d_data_i_mn ( "copy done", dp2, 100, 6, 4, 6 ) ;
#endif
}
|
956eb475b5a4e5bdd5e42a89dbfe9649a0a35cf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file pad.cu
* \brief
* \author Sebastian Bodenstein
*/
#include "./pad-inl.h"
#include "../common/cuda_utils.h"
namespace mshadow {
namespace cuda {
////////////////////////////////////////////////////////////////////////////////
// Special Case: 2d image (so only pad width + height)
// Case 1: Replication Padding
// single_image_2d_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX =
min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_2d_pad_edge_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src,
padT, padL);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) -
oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_2d_pad_edge_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padT, padL);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
// cast sizes to int to use in min/max
int Ny = src.size(2);
int Nx = src.size(3);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int checkT = max(0, outputPointY - padT + 1);
int checkB = max(0, padT + Ny - outputPointY);
int checkL = max(0, outputPointX - padL + 1);
int checkR = max(0, padL + Nx - outputPointX);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkT * checkB * checkL * checkR);
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_2d_pad_constant_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padT, padL, constant);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(3);
int inPointY = inPointId / grad_in.size(3);
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointY][inPointX] =
grad_out[batch][plane][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
hipLaunchKernelGGL(( image_2d_pad_constant_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padT, padL);
}
////////////////////////////////////////////////////////////////////////////////
// Special Case: 3d image (pad depth + width + height)
// Case 1: Replication Padding
// single_image_3_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX =
min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY;
int inputPointZ =
min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_3d_pad_edge_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padF, padT, padL);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) -
oStartY + iStartY;
int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) -
oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_3d_pad_edge_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padF, padT, padL);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
// cast sizes to int to use in min/max
int Nz = src.size(2);
int Ny = src.size(3);
int Nx = src.size(4);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int checkFront = max(0, outputPointZ - padF + 1);
int checkBack = max(0, padF + Nz - outputPointZ);
int checkTop = max(0, outputPointY - padT + 1);
int checkBottom = max(0, padT + Ny - outputPointY);
int checkLeft = max(0, outputPointX - padL + 1);
int checkRight = max(0, padL + Nx - outputPointX);
int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft *
checkRight);
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_3d_pad_constant_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padF, padT, padL, constant);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(4);
int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3);
int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4));
int outPointZ = inPointZ + padF;
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointZ][inPointY][inPointX] =
grad_out[batch][plane][outPointZ][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
hipLaunchKernelGGL(( image_3d_pad_constant_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padF, padT, padL);
}
////////////////////////////////////////////////////////////////////////////////
} // namespace cuda
template <int dim, typename DType>
void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src,
const mxnet::TShape pad, int mode, const DType constant_value) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge(dst, src, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant(dst, src, pad, constant_value);
break;
}
}
template <int dim, typename DType>
void pad_image_grad(Tensor<gpu, dim, DType> grad_in,
const Tensor<gpu, dim, DType> grad_out,
const mxnet::TShape pad, int mode) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant_grad(grad_in, grad_out, pad);
break;
}
}
} // namespace mshadow
////////////////////////////////////////////////////////////////////////////////
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<gpu>(PadParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); })
return op;
}
} // namespace op
} // namespace mxnet
| 956eb475b5a4e5bdd5e42a89dbfe9649a0a35cf7.cu | /*!
* Copyright (c) 2015 by Contributors
* \file pad.cu
* \brief
* \author Sebastian Bodenstein
*/
#include "./pad-inl.h"
#include "../common/cuda_utils.h"
namespace mshadow {
namespace cuda {
////////////////////////////////////////////////////////////////////////////////
// Special Case: 2d image (so only pad width + height)
// Case 1: Replication Padding
// single_image_2d_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX =
min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src,
padT, padL);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) -
oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_2d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
// cast sizes to int to use in min/max
int Ny = src.size(2);
int Nx = src.size(3);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int checkT = max(0, outputPointY - padT + 1);
int checkB = max(0, padT + Ny - outputPointY);
int checkL = max(0, outputPointX - padL + 1);
int checkR = max(0, padL + Nx - outputPointX);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkT * checkB * checkL * checkR);
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padT, padL, constant);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(3);
int inPointY = inPointId / grad_in.size(3);
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointY][inPointX] =
grad_out[batch][plane][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_2d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
}
////////////////////////////////////////////////////////////////////////////////
// Special Case: 3d image (pad depth + width + height)
// Case 1: Replication Padding
// single_image_3_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX =
min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY;
int inputPointZ =
min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) -
oStartY + iStartY;
int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) -
oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_3d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
// cast sizes to int to use in min/max
int Nz = src.size(2);
int Ny = src.size(3);
int Nx = src.size(4);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int checkFront = max(0, outputPointZ - padF + 1);
int checkBack = max(0, padF + Nz - outputPointZ);
int checkTop = max(0, outputPointY - padT + 1);
int checkBottom = max(0, padT + Ny - outputPointY);
int checkLeft = max(0, outputPointX - padL + 1);
int checkRight = max(0, padL + Nx - outputPointX);
int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft *
checkRight);
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL, constant);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(4);
int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3);
int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4));
int outPointZ = inPointZ + padF;
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointZ][inPointY][inPointX] =
grad_out[batch][plane][outPointZ][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_3d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
}
////////////////////////////////////////////////////////////////////////////////
} // namespace cuda
template <int dim, typename DType>
void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src,
const mxnet::TShape pad, int mode, const DType constant_value) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge(dst, src, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant(dst, src, pad, constant_value);
break;
}
}
template <int dim, typename DType>
void pad_image_grad(Tensor<gpu, dim, DType> grad_in,
const Tensor<gpu, dim, DType> grad_out,
const mxnet::TShape pad, int mode) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant_grad(grad_in, grad_out, pad);
break;
}
}
} // namespace mshadow
////////////////////////////////////////////////////////////////////////////////
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<gpu>(PadParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); })
return op;
}
} // namespace op
} // namespace mxnet
|
a42ca7d2a680ccfd3a8d5744d73457aa8a276ccf.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define POLYBENCH_TIME 1
#include "../../../../utilities/remapping.h"
#include "../../../../utilities/remapping_mode.h"
#include "3mm.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj),
DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl))
{
int i, j;
for (i = 0; i < ni; i++)
{
for (j = 0; j < nk; j++)
{
A[i][j] = ((DATA_TYPE) i*j) / ni;
}
}
for (i = 0; i < nk; i++)
{
for (j = 0; j < nj; j++)
{
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
}
}
for (i = 0; i < nj; i++)
{
for (j = 0; j < nm; j++)
{
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
}
}
for (i = 0; i < nm; i++)
{
for (j = 0; j < nl; j++)
{
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
}
}
void compareResults(int ni, int nl, DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl), DATA_TYPE POLYBENCH_2D(G_outputFromGpu, NI, NL, ni, nl))
{
int i,j,fail;
fail = 0;
for (i=0; i < ni; i++)
{
for (j=0; j < nl; j++)
{
if (percentDiff(G[i][j], G_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void mm3_kernel1(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NI) && (j < _PB_NJ))
{
E[i * NJ + j] = 0;
int k;
for(k=0; k < _PB_NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NJ) && (j < _PB_NL))
{
F[i * NL + j] = 0;
int k;
for(k=0; k < _PB_NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NI) && (j < _PB_NL))
{
G[i * NL + j] = 0;
int k;
for(k=0; k < _PB_NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
/* Main computational kernel on CPU */
void mm3_cpu(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
{
E[i][j] += A[i][k] * B[k][j];
}
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
{
F[i][j] += C[i][k] * D[k][j];
}
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
{
G[i][j] += E[i][k] * F[k][j];
}
}
}
}
void mm3Cuda(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl),
DATA_TYPE POLYBENCH_2D(G_outputFromGpu,NI,NL,ni,nl))
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
DATA_TYPE *F_gpu;
DATA_TYPE *G_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM);
hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL);
hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL);
hipMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, hipMemcpyHostToDevice);
hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, hipMemcpyHostToDevice);
hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
hipMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice);
hipMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
/* Start timer. */
polybench_start_instruments;
hipLaunchKernelGGL(( mm3_kernel1), dim3(grid1),dim3(block), 0, 0, ni, nj, nk, nl, nm, A_gpu, B_gpu, E_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( mm3_kernel2), dim3(grid2),dim3(block), 0, 0, ni, nj, nk, nl, nm, C_gpu, D_gpu, F_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( mm3_kernel3), dim3(grid3),dim3(block), 0, 0, ni, nj, nk, nl, nm, E_gpu, F_gpu, G_gpu);
hipDeviceSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
hipMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
hipFree(D_gpu);
hipFree(E_gpu);
hipFree(F_gpu);
hipFree(G_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char** argv)
{
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
POLYBENCH_2D_ARRAY_DECL(G_outputFromGpu, DATA_TYPE, NI, NL, ni, nl);
init_array(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D));
GPU_argv_init();
mm3Cuda(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(G), POLYBENCH_ARRAY(G_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
mm3_cpu(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(G));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ni, nl, POLYBENCH_ARRAY(G), POLYBENCH_ARRAY(G_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(G);
POLYBENCH_FREE_ARRAY(G_outputFromGpu);
return 0;
}
#include <polybench.c>
| a42ca7d2a680ccfd3a8d5744d73457aa8a276ccf.cu | /**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#define POLYBENCH_TIME 1
#include "../../../../utilities/remapping.h"
#include "../../../../utilities/remapping_mode.h"
#include "3mm.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj),
DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl))
{
int i, j;
for (i = 0; i < ni; i++)
{
for (j = 0; j < nk; j++)
{
A[i][j] = ((DATA_TYPE) i*j) / ni;
}
}
for (i = 0; i < nk; i++)
{
for (j = 0; j < nj; j++)
{
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
}
}
for (i = 0; i < nj; i++)
{
for (j = 0; j < nm; j++)
{
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
}
}
for (i = 0; i < nm; i++)
{
for (j = 0; j < nl; j++)
{
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
}
}
void compareResults(int ni, int nl, DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl), DATA_TYPE POLYBENCH_2D(G_outputFromGpu, NI, NL, ni, nl))
{
int i,j,fail;
fail = 0;
for (i=0; i < ni; i++)
{
for (j=0; j < nl; j++)
{
if (percentDiff(G[i][j], G_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mm3_kernel1(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NI) && (j < _PB_NJ))
{
E[i * NJ + j] = 0;
int k;
for(k=0; k < _PB_NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NJ) && (j < _PB_NL))
{
F[i * NL + j] = 0;
int k;
for(k=0; k < _PB_NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(int ni, int nj, int nk, int nl, int nm, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
int i = remappingBlockIDy(blockIdx.y, BLOCKYMODE) * blockDim.y + remappingThreadIDy(threadIdx.y, THREADYMODE);
if ((i < _PB_NI) && (j < _PB_NL))
{
G[i * NL + j] = 0;
int k;
for(k=0; k < _PB_NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
/* Main computational kernel on CPU */
void mm3_cpu(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
{
E[i][j] += A[i][k] * B[k][j];
}
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
{
F[i][j] += C[i][k] * D[k][j];
}
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
{
G[i][j] += E[i][k] * F[k][j];
}
}
}
}
void mm3Cuda(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl),
DATA_TYPE POLYBENCH_2D(G_outputFromGpu,NI,NL,ni,nl))
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
DATA_TYPE *F_gpu;
DATA_TYPE *G_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM);
cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL);
cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL);
cudaMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, cudaMemcpyHostToDevice);
cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, cudaMemcpyHostToDevice);
cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice);
cudaMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
/* Start timer. */
polybench_start_instruments;
mm3_kernel1<<<grid1,block>>>(ni, nj, nk, nl, nm, A_gpu, B_gpu, E_gpu);
cudaThreadSynchronize();
mm3_kernel2<<<grid2,block>>>(ni, nj, nk, nl, nm, C_gpu, D_gpu, F_gpu);
cudaThreadSynchronize();
mm3_kernel3<<<grid3,block>>>(ni, nj, nk, nl, nm, E_gpu, F_gpu, G_gpu);
cudaThreadSynchronize();
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
cudaMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
cudaFree(D_gpu);
cudaFree(E_gpu);
cudaFree(F_gpu);
cudaFree(G_gpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
int main(int argc, char** argv)
{
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
POLYBENCH_2D_ARRAY_DECL(G_outputFromGpu, DATA_TYPE, NI, NL, ni, nl);
init_array(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D));
GPU_argv_init();
mm3Cuda(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(G), POLYBENCH_ARRAY(G_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
mm3_cpu(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(G));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(ni, nl, POLYBENCH_ARRAY(G), POLYBENCH_ARRAY(G_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(G);
POLYBENCH_FREE_ARRAY(G_outputFromGpu);
return 0;
}
#include <polybench.c>
|
d925b8603b5c332c7e67fda14dd54057e10744d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_transform.h"
__device__ double op(double d1,double d2,double *params) {
if(d1 < d2) return 1;
else return 0;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void lt_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *params,double *result,int incz) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz);
} | d925b8603b5c332c7e67fda14dd54057e10744d2.cu | #include "pairwise_transform.h"
__device__ double op(double d1,double d2,double *params) {
if(d1 < d2) return 1;
else return 0;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void lt_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *params,double *result,int incz) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result,incz);
} |
7945e0cc2d03c8109eaef61cd79c688748771338.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Graph/Device/cudaGraphBFS.cuh"
#include "Base/Host/fUtil.hpp"
#include "Base/Host/numeric.hpp"
#include "Base/Device/Util/cuda_util.cuh"
using namespace xlib;
namespace graph {
__constant__ _node_t* devF1 = 0;
__constant__ _node_t* devF2 = 0;
__constant__ _dist_t* devDistance = 0;
__device__ int devF2_size[4];
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
const dist_t cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::INF_DIST =
std::numeric_limits<dist_t>::max();
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>
::cudaGraphBFS(GraphSTD<node_t, edge_t, dist_t>& _graph,
bool _inverse_graph,
unsigned _degree_options)
: cudaGraph<node_t, edge_t>
(_graph, _inverse_graph, _degree_options) {
hipMalloc(&cuDistance, static_cast<std::size_t>(V) * sizeof(dist_t));
hipMemcpyToSymbol(devDistance, &cuDistance, sizeof(dist_t*));
__CUDA_ERROR("Graph Frontier Allocation");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
void cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::AllocateFrontiers() {
std::size_t free, total;
hipMemGetInfo(&free, &total);
std::size_t frontier_size = (free / 2u) - 4 * (1024 * 1024);
hipMalloc(&cuF1, frontier_size);
hipMalloc(&cuF2, frontier_size);
hipMemcpyToSymbol(devF1, &cuF1, sizeof(node_t*));
hipMemcpyToSymbol(devF2, &cuF2, sizeof(node_t*));
max_frontier_nodes = frontier_size / sizeof(node_t);
__CUDA_ERROR("Graph Frontier Allocation");
if (max_frontier_nodes < V)
__ERROR("Device Memory not sufficient");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::~cudaGraphBFS() {
hipFree(cuF1);
hipFree(cuF2);
hipFree(cuDistance);
__CUDA_ERROR("Graph Free");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
void cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>
::reset(int* Sources, int n_of_sources, bool update_distance) {
hipMemcpy(cuF1, Sources, n_of_sources * sizeof(node_t),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( xlib::fill), dim3(Div(V, 128)), dim3(128), 0, 0, cuDistance, V, INF_DIST);
if (update_distance) {
hipLaunchKernelGGL(( xlib::scatter) , dim3(Div(n_of_sources, 128)), dim3(128), 0, 0,
cuF1, n_of_sources, cuDistance, dist_t(0));
}
int SizeArray[4] = {};
hipMemcpyToSymbol(devF2_size, SizeArray, sizeof(SizeArray));
__CUDA_ERROR("Graph Reset");
}
template class cudaGraphBFS<>;
} //@graph
| 7945e0cc2d03c8109eaef61cd79c688748771338.cu | #include "Graph/Device/cudaGraphBFS.cuh"
#include "Base/Host/fUtil.hpp"
#include "Base/Host/numeric.hpp"
#include "Base/Device/Util/cuda_util.cuh"
using namespace xlib;
namespace graph {
__constant__ _node_t* devF1 = 0;
__constant__ _node_t* devF2 = 0;
__constant__ _dist_t* devDistance = 0;
__device__ int devF2_size[4];
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
const dist_t cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::INF_DIST =
std::numeric_limits<dist_t>::max();
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>
::cudaGraphBFS(GraphSTD<node_t, edge_t, dist_t>& _graph,
bool _inverse_graph,
unsigned _degree_options)
: cudaGraph<node_t, edge_t>
(_graph, _inverse_graph, _degree_options) {
cudaMalloc(&cuDistance, static_cast<std::size_t>(V) * sizeof(dist_t));
cudaMemcpyToSymbol(devDistance, &cuDistance, sizeof(dist_t*));
__CUDA_ERROR("Graph Frontier Allocation");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
void cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::AllocateFrontiers() {
std::size_t free, total;
cudaMemGetInfo(&free, &total);
std::size_t frontier_size = (free / 2u) - 4 * (1024 * 1024);
cudaMalloc(&cuF1, frontier_size);
cudaMalloc(&cuF2, frontier_size);
cudaMemcpyToSymbol(devF1, &cuF1, sizeof(node_t*));
cudaMemcpyToSymbol(devF2, &cuF2, sizeof(node_t*));
max_frontier_nodes = frontier_size / sizeof(node_t);
__CUDA_ERROR("Graph Frontier Allocation");
if (max_frontier_nodes < V)
__ERROR("Device Memory not sufficient");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>::~cudaGraphBFS() {
cudaFree(cuF1);
cudaFree(cuF2);
cudaFree(cuDistance);
__CUDA_ERROR("Graph Free");
}
template<typename node_t, typename edge_t, typename cunode_t, typename dist_t>
void cudaGraphBFS<node_t, edge_t, cunode_t, dist_t>
::reset(int* Sources, int n_of_sources, bool update_distance) {
cudaMemcpy(cuF1, Sources, n_of_sources * sizeof(node_t),
cudaMemcpyHostToDevice);
xlib::fill<<<Div(V, 128), 128>>>(cuDistance, V, INF_DIST);
if (update_distance) {
xlib::scatter <<<Div(n_of_sources, 128), 128>>>
(cuF1, n_of_sources, cuDistance, dist_t(0));
}
int SizeArray[4] = {};
cudaMemcpyToSymbol(devF2_size, SizeArray, sizeof(SizeArray));
__CUDA_ERROR("Graph Reset");
}
template class cudaGraphBFS<>;
} //@graph
|
55426937b84e83f044b5d8fe324ed66982297fb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_z
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = cuComplexFloatToDouble( SA[j*ldsa] );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = cuComplexFloatToDouble( SA[j*ldsa] );
}
}
}
}
/**
Purpose
-------
CLAG2Z_STREAM converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA REAL array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clag2z_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_int_t *info,
magma_queue_t queue)
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X );
dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y );
hipLaunchKernelGGL(( clag2z_kernel), dim3(grid), dim3(threads), 0, queue , m, n, SA, ldsa, A, lda );
}
/**
@see magmablas_clag2z_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_int_t *info)
{
magmablas_clag2z_q( m, n, SA, ldsa, A, lda, info, magma_stream );
}
| 55426937b84e83f044b5d8fe324ed66982297fb1.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_z
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = cuComplexFloatToDouble( SA[j*ldsa] );
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = cuComplexFloatToDouble( SA[j*ldsa] );
}
}
}
}
/**
Purpose
-------
CLAG2Z_STREAM converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA REAL array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A DOUBLE PRECISION array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clag2z_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_int_t *info,
magma_queue_t queue)
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X );
dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y );
clag2z_kernel<<< grid, threads, 0, queue >>> ( m, n, SA, ldsa, A, lda );
}
/**
@see magmablas_clag2z_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_int_t *info)
{
magmablas_clag2z_q( m, n, SA, ldsa, A, lda, info, magma_stream );
}
|
7f8419935fc7560b5bf1dfcfd9d2ea533e40676a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <stdint.h>
#define MASTER 0
void Usage(char* prog_name) {
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(1);
}
__host__
double sequential(long long n) {
long long i;
double factor = 1;
double sum = 0.0;
for (i = 0; i < n; i++) {
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor/(2*i+1);
}
sum = 4.0*sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
return sum;
}
__global__ void parallel(long long n, double* g_odata)
{
extern __shared__ double s_data[];
double factor;
unsigned int tid = threadIdx.x;
uint64_t i = (uint64_t)blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
factor = (i % 2 == 0) ? 1.0 : -1.0;
s_data[tid] = factor/(2*i+1);
}
else{
s_data[tid] = 0;
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if (tid < s)
{
s_data[tid] = s_data[tid] + s_data[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = s_data[0];
}
int main(int argc, char* argv[]){
long long n;
double* cudaMem;
double* almost;
hipEvent_t start = hipEvent_t(), stop = hipEvent_t();
n = 100000;
hipEventCreate(&start);
hipEventCreate(&stop);
for(int i = 0; i < 5; i++){
double res_seq;
double res_par = 0;
n*=10;
printf("\nSEQUENTIAL:\n");
hipEventRecord(start, 0);
res_seq = sequential(n);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed = 0.;
hipEventElapsedTime(&elapsed, start, stop);
printf("Sequential Time: %f\n", elapsed);
printf("\nPARALLEL:\n");
hipMalloc(&cudaMem, ceil(n/1024)*sizeof(double));
hipEventRecord(start, 0);
hipLaunchKernelGGL(( parallel), dim3(ceil(n/1024)),dim3(1024),1024*sizeof(double), 0, n, cudaMem);
almost = (double*)calloc(ceil(n/1024), sizeof(double));
hipMemcpy(almost, cudaMem, ceil(n/1024)*sizeof(double), hipMemcpyDeviceToHost);
hipFree(cudaMem);
for(int j = 0; j < ceil(n/1024); j++)
{
res_par += almost[j];
}
free(almost);
res_par *= 4;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
elapsed = 0.;
hipEventElapsedTime(&elapsed, start, stop);
printf(" Our estimate of pi = %.14f\n", res_par);
printf("Parallel Time: %f\n", elapsed);
if(abs(res_par - res_seq) <= 0.01)
{
printf("\nTEST PASSED\n");
}
else
{
printf("\nTEST FAILED\n");
}
}
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 7f8419935fc7560b5bf1dfcfd9d2ea533e40676a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <stdint.h>
#define MASTER 0
void Usage(char* prog_name) {
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(1);
}
__host__
double sequential(long long n) {
long long i;
double factor = 1;
double sum = 0.0;
for (i = 0; i < n; i++) {
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor/(2*i+1);
}
sum = 4.0*sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
return sum;
}
__global__ void parallel(long long n, double* g_odata)
{
extern __shared__ double s_data[];
double factor;
unsigned int tid = threadIdx.x;
uint64_t i = (uint64_t)blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
factor = (i % 2 == 0) ? 1.0 : -1.0;
s_data[tid] = factor/(2*i+1);
}
else{
s_data[tid] = 0;
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if (tid < s)
{
s_data[tid] = s_data[tid] + s_data[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = s_data[0];
}
int main(int argc, char* argv[]){
long long n;
double* cudaMem;
double* almost;
cudaEvent_t start = cudaEvent_t(), stop = cudaEvent_t();
n = 100000;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i = 0; i < 5; i++){
double res_seq;
double res_par = 0;
n*=10;
printf("\nSEQUENTIAL:\n");
cudaEventRecord(start, 0);
res_seq = sequential(n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed = 0.;
cudaEventElapsedTime(&elapsed, start, stop);
printf("Sequential Time: %f\n", elapsed);
printf("\nPARALLEL:\n");
cudaMalloc(&cudaMem, ceil(n/1024)*sizeof(double));
cudaEventRecord(start, 0);
parallel<<<ceil(n/1024),1024,1024*sizeof(double)>>>(n, cudaMem);
almost = (double*)calloc(ceil(n/1024), sizeof(double));
cudaMemcpy(almost, cudaMem, ceil(n/1024)*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(cudaMem);
for(int j = 0; j < ceil(n/1024); j++)
{
res_par += almost[j];
}
free(almost);
res_par *= 4;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
elapsed = 0.;
cudaEventElapsedTime(&elapsed, start, stop);
printf(" Our estimate of pi = %.14f\n", res_par);
printf("Parallel Time: %f\n", elapsed);
if(abs(res_par - res_seq) <= 0.01)
{
printf("\nTEST PASSED\n");
}
else
{
printf("\nTEST FAILED\n");
}
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
3f118b3fd16253e83c9811af7401cdb420719d27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 2250
#define T 512
__global__ void vecReverse(int *a, int *b, int *c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
if (i % 2 == 0){
c[i] = a[i] + b[i];
}else{
c[i] = a[i] - b[i];
}
}
}
int main(int argc, char *argv[]){
int size = N * sizeof(int);
int a[N], b[N], c[N], *devA, *devB, *devC;
int blocks;
//Compute the blocks in case that N % T != 0
if (N % T != 0){
blocks =(N+T-1) / T;
}else{
blocks = N/T;
}
srand(1234);
for (int i = 0; i < N; i++){
a[i] = rand() % 1000;
b[i] = rand() % 1000;
}
hipMalloc((void**)&devA, size);
hipMalloc((void**)&devB, size);
hipMalloc((void**)&devC, size);
hipMemcpy(devA, a, size, hipMemcpyHostToDevice);
hipMemcpy(devB, b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vecReverse), dim3(blocks),dim3(T), 0, 0, devA,devB,devC);
hipMemcpy(c, devC, size, hipMemcpyDeviceToHost);
hipFree(devA);
hipFree(devB);
hipFree(devC);
for (int i = 0; i < N; i++){
printf("%d ",c[i]);
}
printf("\n");
} | 3f118b3fd16253e83c9811af7401cdb420719d27.cu | #include <stdio.h>
#define N 2250
#define T 512
__global__ void vecReverse(int *a, int *b, int *c){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
if (i % 2 == 0){
c[i] = a[i] + b[i];
}else{
c[i] = a[i] - b[i];
}
}
}
int main(int argc, char *argv[]){
int size = N * sizeof(int);
int a[N], b[N], c[N], *devA, *devB, *devC;
int blocks;
//Compute the blocks in case that N % T != 0
if (N % T != 0){
blocks =(N+T-1) / T;
}else{
blocks = N/T;
}
srand(1234);
for (int i = 0; i < N; i++){
a[i] = rand() % 1000;
b[i] = rand() % 1000;
}
cudaMalloc((void**)&devA, size);
cudaMalloc((void**)&devB, size);
cudaMalloc((void**)&devC, size);
cudaMemcpy(devA, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, size, cudaMemcpyHostToDevice);
vecReverse<<<blocks,T>>>(devA,devB,devC);
cudaMemcpy(c, devC, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
for (int i = 0; i < N; i++){
printf("%d ",c[i]);
}
printf("\n");
} |
9892757f081e0c20818cbea0633483e6f8125103.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////
///////////////////////////////// SSSP6
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <hip/hip_runtime.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP6_Texture_AllOfAll
#define _SSSP6_Texture_AllOfAll
//////////////////////////////////////////
bool ejecutarIteracion_SSSP6_tex_allOfAll(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
/* Updated timer code for CUDA 9 */
hipEvent_t timerStart, timerStop;
float time;
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
hipLaunchKernelGGL(( kernel1_SSSP6_tex_all), dim3(grid),dim3(threads),threads.x*sizeof(unsigned int), 0, c_d);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
//printf("%.6f", time);
printf("K1 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//ejecutar ltimo kernel
hipGetLastError(); // reset the runtime error variable to hipSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
hipLaunchKernelGGL(( kernel3_tex), dim3(grid),dim3(threads), 0, 0, p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
//printf("%.6f", time);
printf("K3 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H((void*)f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP6_tex_allOfAll(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito,
const unsigned int* v_h, const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vrtices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
hipBindTexture(0, textura_v, v_d, mem_size_V);
hipBindTexture(0, textura_a, a_d, mem_size_A);
hipBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solucin en el host
unsigned int* c_d; //solucin en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
hipBindTexture(0, textura_c, c_d, mem_size_C);
hipBindTexture(0, textura_p, p_d, mem_size_F);
hipBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock;
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid( num_blocksInGrid, 1, 1);
dim3 threads( num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi= nv/(2*num_threadsInBlock);
unsigned int nvo= nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
hipMalloc((void**) &cdi, nvi*sizeof(unsigned int));
hipMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
hipEvent_t timerStart, timerStop;
float time;
// start things
hipEventCreate(&timerStart);
hipEventCreate(&timerStop);
hipEventRecord(timerStart, 0);
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP6_tex_allOfAll( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
hipEventRecord(timerStop, 0);
hipEventSynchronize(timerStop);
hipEventElapsedTime(&time, timerStart, timerStop);
hipEventDestroy(timerStart);
hipEventDestroy(timerStop);
printf("%.6f", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
hipUnbindTexture(textura_v);
hipUnbindTexture(textura_a);
hipUnbindTexture(textura_w);
// cleanup memory
hipFree(v_d);
hipFree(a_d);
hipFree(w_d);
free(f_h);
free(p_h);
//desenlazar las texturas
hipUnbindTexture(textura_c);
//hipUnbindTexture(textura_p);
//hipUnbindTexture(textura_f);
hipFree(c_d);
hipFree(f_d);
hipFree(p_d);
free(chi);
free(cho);
hipFree(cdi);
hipFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP6_Texture_AllOfAll
| 9892757f081e0c20818cbea0633483e6f8125103.cu | ///////////////////////////////////////
///////////////////////////////// SSSP6
/////////////////////// usando texturas
///////////////////////////////////////
/* CWJ includes */
#include <cuda.h>
#include "comun.cu"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#ifndef _SSSP6_Texture_AllOfAll
#define _SSSP6_Texture_AllOfAll
//////////////////////////////////////////
bool ejecutarIteracion_SSSP6_tex_allOfAll(
const unsigned int nVuelta,
const dim3 grid, const dim3 threads,
const unsigned int nv, const unsigned int na,
const unsigned int mem_size_V, const unsigned int mem_size_A,
const unsigned int mem_size_C, const unsigned int mem_size_F,
const unsigned int infinito,
bool* p_h, bool* f_h, unsigned int* c_h ,
bool* p_d, bool* f_d, unsigned int* c_d,
unsigned int* chi, unsigned int* cho, unsigned int* cdi, unsigned int* cdo)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
#ifdef DEBUG
printf("\n\n*******************\n");
printf("\nVUELTA %i\n",nVuelta);
mostrarUI(c_h, nv, "c_h");
mostrarB(f_h, nv, "f_h");
mostrarB(p_h, nv, "p_h");
printf("\nEJECUCION KERNEL 1\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
/* Updated timer code for CUDA 9 */
cudaEvent_t timerStart, timerStop;
float time;
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
// ACTUALIZANDO CAMINOS MINIMOS ESPECIALES: kernel1
kernel1_SSSP6_tex_all<<<grid,threads,threads.x*sizeof(unsigned int)>>>( c_d);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
//printf("%.6f", time);
printf("K1 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
mostrarUI(c_h, nv, "c_h");
printf("\nEJECUCION KERNEL 2\n");
#endif // DEBUG
//MINIMIZANDO LOS COSTES RECIEN ACTUALIZADOS
unsigned int min= infinito;
minimizar(nv, c_d, p_d, threads, infinito, chi, cho, cdi, cdo, min);
#ifdef DEBUG
printf("\n\nEl minimo es %i\n", min);
printf("\nEJECUCION KERNEL 3\n");
printf("num_threadsInBlock= %i\n", threads.x);
printf("num_blocksInGrid= %i\n", grid.x);
#endif // DEBUG
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//ejecutar último kernel
cudaGetLastError(); // reset the runtime error variable to cudaSuccess
//ACTUALIZANDO LA FRONTERA: Kernel3
kernel3_tex<<<grid,threads>>>( p_d, f_d, min);
// check if kernel execution generated and error
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
cudaThreadSynchronize();
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
//printf("%.6f", time);
printf("K3 = %f\n", time);
#ifdef DEBUG
copiarD2H((void*)p_h, (void*)p_d, mem_size_F);
mostrarB(p_h, nv, "p_h");
copiarD2H((void*)f_h, (void*)f_d, mem_size_F);
mostrarB(f_h, nv, "f_h");
#endif // DEBUG
return (min==infinito);
}
//////////////////////////////////
void testGraph_SSSP6_tex_allOfAll(
const unsigned int nv, const unsigned int mem_size_V,
const unsigned int na, const unsigned int mem_size_A,
const unsigned int infinito,
const unsigned int* v_h, const unsigned int* a_h, const unsigned int* w_h,
const unsigned int* reference)
{
//RECUERDA: mem_size_V= (nv+1)*sizeof(unsigned int)
unsigned int* v_d; //array de vértices device
unsigned int* a_d; //array de aristas device
unsigned int* w_d; //array de pesos device
//copiar grafo de host a device
inicializar_Grafo_Device(v_h, mem_size_V, v_d,
a_h, mem_size_A, a_d,
w_h, w_d);
//enlazar las texturas
cudaBindTexture(0, textura_v, v_d, mem_size_V);
cudaBindTexture(0, textura_a, a_d, mem_size_A);
cudaBindTexture(0, textura_w, w_d, mem_size_A);
unsigned int* c_h; //solución en el host
unsigned int* c_d; //solución en el device
unsigned int mem_size_C= mem_size_V-sizeof(unsigned int); //Descontar el tapon -4
inicializar_Sol(c_h, c_d, nv, mem_size_C, infinito);
bool* f_h; //frontera en el host
bool* f_d; //frontera en el device
unsigned int mem_size_F= sizeof(bool) * nv;
inicializar_Frontera(f_h, f_d, nv, mem_size_F);
bool* p_h; //pendientes por procesar
bool* p_d; //pendientes por procesar
inicializar_Pendientes(p_h, p_d, nv, mem_size_F);
//enlazar las texturas del algoritmo
cudaBindTexture(0, textura_c, c_d, mem_size_C);
cudaBindTexture(0, textura_p, p_d, mem_size_F);
cudaBindTexture(0, textura_f, f_d, mem_size_F);
#ifdef DEBUG
//DEPURACION
printf("\nnv= %i\n", nv);
printf("na= %i\n", na);
printf("mem_size_V= %i\n", mem_size_V);
printf("mem_size_A= %i\n", mem_size_A);
printf("mem_size_F= %i\n\n", mem_size_F);
#endif // DEBUG
// setup execution parameters
unsigned int num_threadsInBlock= NUM_THREADS_IN_BLOCK;
//unsigned int num_blocksInGrid= nv/num_threadsInBlock;
unsigned int num_blocksInGrid = (nv + (num_threadsInBlock-1)) / num_threadsInBlock;
dim3 grid( num_blocksInGrid, 1, 1);
dim3 threads( num_threadsInBlock, 1, 1);
//RESERVAR ESPACIO PARA LA MINIMIZACION
unsigned int nvi= nv/(2*num_threadsInBlock);
unsigned int nvo= nvi/(2*num_threadsInBlock);
unsigned int* cdi;
unsigned int* cdo;
cudaMalloc((void**) &cdi, nvi*sizeof(unsigned int));
cudaMalloc((void**) &cdo, nvo*sizeof(unsigned int));
unsigned int* chi = (unsigned int*) malloc(nvi*sizeof(unsigned int));
unsigned int* cho = (unsigned int*) malloc(nvo*sizeof(unsigned int));
/* Updated timer code for CUDA 9 */
cudaEvent_t timerStart, timerStop;
float time;
// start things
cudaEventCreate(&timerStart);
cudaEventCreate(&timerStop);
cudaEventRecord(timerStart, 0);
//EJECUTAR VUELTAS
bool ultima= false;
unsigned int i= 0;
while(!ultima){
i++;
ultima= ejecutarIteracion_SSSP6_tex_allOfAll( i,
grid, threads,
nv, na,
mem_size_V, mem_size_A, mem_size_C, mem_size_F,
infinito,
p_h, f_h, c_h,
p_d, f_d, c_d,
chi, cho, cdi, cdo);
}//while
// end things
cudaEventRecord(timerStop, 0);
cudaEventSynchronize(timerStop);
cudaEventElapsedTime(&time, timerStart, timerStop);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
printf("%.6f", time);
copiarD2H((void*)c_h, (void*)c_d, mem_size_C);
//desenlazar las texturas
cudaUnbindTexture(textura_v);
cudaUnbindTexture(textura_a);
cudaUnbindTexture(textura_w);
// cleanup memory
cudaFree(v_d);
cudaFree(a_d);
cudaFree(w_d);
free(f_h);
free(p_h);
//desenlazar las texturas
cudaUnbindTexture(textura_c);
//cudaUnbindTexture(textura_p);
//cudaUnbindTexture(textura_f);
cudaFree(c_d);
cudaFree(f_d);
cudaFree(p_d);
free(chi);
free(cho);
cudaFree(cdi);
cudaFree(cdo);
// check result
//CUTBoolean res = cutComparei( (int*)reference, (int*)c_h, nv);
//printf( "%s\t", (1 == res) ? "OK" : "FAILED");
//mostrarUI(c_h, nv, "c_h");
//mostrarUI(reference, nv, "reference");
// cleanup memory
free(c_h);
}
#endif //#ifndef _SSSP6_Texture_AllOfAll
|
f9ba889a8d9beb57fb2c618a968aa419a26df331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
#include "cutil_math.h"
#include "math_constants.h"
typedef struct Vertex16Color_s{
float3 pos;
uchar4 color;
}Vertex16Color_t;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(Vertex16Color_t* vertex, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
float3 pos = make_float3(u,w,v);
float3 poscolor = (pos*0.5f + 0.5f)*255.0f;
// write output vertex
Vertex16Color_t vtx;
vtx.pos = pos;
vtx.color = make_uchar4(poscolor.x,poscolor.y,poscolor.z,0);
vertex[y*width+x] = vtx;
//vertex[y*width+x] = make_float4(u, w, v, 1.0f);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(struct Vertex16Color_s* vertex, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, vertex, mesh_width, mesh_height, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
| f9ba889a8d9beb57fb2c618a968aa419a26df331.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* This example demonstrates how to use the Cuda OpenGL bindings with the
* runtime API.
* Device code.
*/
#ifndef _SIMPLEGL_KERNEL_H_
#define _SIMPLEGL_KERNEL_H_
#include "cutil_math.h"
#include "math_constants.h"
typedef struct Vertex16Color_s{
float3 pos;
uchar4 color;
}Vertex16Color_t;
///////////////////////////////////////////////////////////////////////////////
//! Simple kernel to modify vertex positions in sine wave pattern
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(Vertex16Color_t* vertex, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
float3 pos = make_float3(u,w,v);
float3 poscolor = (pos*0.5f + 0.5f)*255.0f;
// write output vertex
Vertex16Color_t vtx;
vtx.pos = pos;
vtx.color = make_uchar4(poscolor.x,poscolor.y,poscolor.z,0);
vertex[y*width+x] = vtx;
//vertex[y*width+x] = make_float4(u, w, v, 1.0f);
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(struct Vertex16Color_s* vertex, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
kernel<<< grid, block>>>(vertex, mesh_width, mesh_height, time);
}
#endif // #ifndef _SIMPLEGL_KERNEL_H_
|
19c1d4b42e3c6b2f973e2db6d0b71fb5704ba33d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <math.h>
using namespace std;
__global__
void p_vec_dist(int dim, float3 p, float3 *vec, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
res[i] = (p.x - vec[i].x) * (p.x - vec[i].x);
res[i] += (p.y - vec[i].y) * (p.y - vec[i].y);
res[i] += (p.z - vec[i].z) * (p.z - vec[i].z);
}
}
__global__
void vec_vec_dist(int dim, float3 *vec0, float3 *vec1, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
//p_vec_dist<<<1, dim>>>(dim, vec0[i], vec1, res[i]);
for (int j = 0; j < dim; j++){
res[i] = (vec0[i].x - vec1[j].x) * (vec0[i].x - vec1[j].x);
res[i] += (vec0[i].y - vec1[j].y) * (vec0[i].y - vec1[j].y);
res[i] += (vec0[i].z - vec1[j].z) * (vec0[i].z - vec1[j].z);
}
}
}
__global__
void fill_float3(int dim, float3 val, float3 *dst)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
dst[i].x = val.x;
dst[i].y = val.y;
dst[i].z = val.z;
}
}
int main(void){
int dim = 1 << 10;
float3 *x, *y;
float *res;
hipMallocManaged(&x, dim * sizeof(float3));
hipMallocManaged(&y, dim * sizeof(float3));
hipMallocManaged(&res, dim * sizeof(float));
hipLaunchKernelGGL(( fill_float3), dim3(32), dim3(32), 0, 0, dim, make_float3(1.0, 2.0, 3.0), x);
hipLaunchKernelGGL(( fill_float3), dim3(32), dim3(32), 0, 0, dim, make_float3(4.0, 5.0, 6.0), y);
hipLaunchKernelGGL(( vec_vec_dist), dim3(32), dim3(32), 0, 0, dim, x, y, res);
float maxError = 0.0;
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++)
maxError = fmax(maxError, fabs(res[i] - 0.0f));
cout << "Max error: " << maxError << endl;
hipFree(x);
hipFree(y);
hipFree(res);
return 0;
} | 19c1d4b42e3c6b2f973e2db6d0b71fb5704ba33d.cu | #include <iostream>
#include <cstdlib>
#include <math.h>
using namespace std;
__global__
void p_vec_dist(int dim, float3 p, float3 *vec, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
res[i] = (p.x - vec[i].x) * (p.x - vec[i].x);
res[i] += (p.y - vec[i].y) * (p.y - vec[i].y);
res[i] += (p.z - vec[i].z) * (p.z - vec[i].z);
}
}
__global__
void vec_vec_dist(int dim, float3 *vec0, float3 *vec1, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
//p_vec_dist<<<1, dim>>>(dim, vec0[i], vec1, res[i]);
for (int j = 0; j < dim; j++){
res[i] = (vec0[i].x - vec1[j].x) * (vec0[i].x - vec1[j].x);
res[i] += (vec0[i].y - vec1[j].y) * (vec0[i].y - vec1[j].y);
res[i] += (vec0[i].z - vec1[j].z) * (vec0[i].z - vec1[j].z);
}
}
}
__global__
void fill_float3(int dim, float3 val, float3 *dst)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
dst[i].x = val.x;
dst[i].y = val.y;
dst[i].z = val.z;
}
}
int main(void){
int dim = 1 << 10;
float3 *x, *y;
float *res;
cudaMallocManaged(&x, dim * sizeof(float3));
cudaMallocManaged(&y, dim * sizeof(float3));
cudaMallocManaged(&res, dim * sizeof(float));
fill_float3<<<32, 32>>>(dim, make_float3(1.0, 2.0, 3.0), x);
fill_float3<<<32, 32>>>(dim, make_float3(4.0, 5.0, 6.0), y);
vec_vec_dist<<<32, 32>>>(dim, x, y, res);
float maxError = 0.0;
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++)
maxError = fmax(maxError, fabs(res[i] - 0.0f));
cout << "Max error: " << maxError << endl;
cudaFree(x);
cudaFree(y);
cudaFree(res);
return 0;
} |
b387ff63978c0b48e469da551c4f6e0a42ce3700.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by ivf_pq_compute_similarity_00_generate.py
*
* Make changes there and run in this directory:
*
* > python ivf_pq_compute_similarity_00_generate.py
*
*/
#include <raft/neighbors/detail/ivf_pq_compute_similarity-inl.cuh>
#include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh>
#define instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( \
OutT, LutT, IvfSampleFilterT) \
template auto \
raft::neighbors::ivf_pq::detail::compute_similarity_select<OutT, LutT, IvfSampleFilterT>( \
const hipDeviceProp_t& dev_props, \
bool manage_local_topk, \
int locality_hint, \
double preferred_shmem_carveout, \
uint32_t pq_bits, \
uint32_t pq_dim, \
uint32_t precomp_data_count, \
uint32_t n_queries, \
uint32_t n_probes, \
uint32_t topk) \
->raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT>; \
\
template void \
raft::neighbors::ivf_pq::detail::compute_similarity_run<OutT, LutT, IvfSampleFilterT>( \
raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT> s, \
rmm::cuda_stream_view stream, \
uint32_t dim, \
uint32_t n_probes, \
uint32_t pq_dim, \
uint32_t n_queries, \
uint32_t queries_offset, \
raft::distance::DistanceType metric, \
raft::neighbors::ivf_pq::codebook_gen codebook_kind, \
uint32_t topk, \
uint32_t max_samples, \
const float* cluster_centers, \
const float* pq_centers, \
const uint8_t* const* pq_dataset, \
const uint32_t* cluster_labels, \
const uint32_t* _chunk_indices, \
const float* queries, \
const uint32_t* index_list, \
float* query_kths, \
IvfSampleFilterT sample_filter, \
LutT* lut_scores, \
OutT* _out_scores, \
uint32_t* _out_indices);
#define COMMA ,
instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select(
half, half, raft::neighbors::filtering::none_ivf_sample_filter);
#undef COMMA
#undef instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select
| b387ff63978c0b48e469da551c4f6e0a42ce3700.cu |
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by ivf_pq_compute_similarity_00_generate.py
*
* Make changes there and run in this directory:
*
* > python ivf_pq_compute_similarity_00_generate.py
*
*/
#include <raft/neighbors/detail/ivf_pq_compute_similarity-inl.cuh>
#include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh>
#define instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( \
OutT, LutT, IvfSampleFilterT) \
template auto \
raft::neighbors::ivf_pq::detail::compute_similarity_select<OutT, LutT, IvfSampleFilterT>( \
const cudaDeviceProp& dev_props, \
bool manage_local_topk, \
int locality_hint, \
double preferred_shmem_carveout, \
uint32_t pq_bits, \
uint32_t pq_dim, \
uint32_t precomp_data_count, \
uint32_t n_queries, \
uint32_t n_probes, \
uint32_t topk) \
->raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT>; \
\
template void \
raft::neighbors::ivf_pq::detail::compute_similarity_run<OutT, LutT, IvfSampleFilterT>( \
raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT> s, \
rmm::cuda_stream_view stream, \
uint32_t dim, \
uint32_t n_probes, \
uint32_t pq_dim, \
uint32_t n_queries, \
uint32_t queries_offset, \
raft::distance::DistanceType metric, \
raft::neighbors::ivf_pq::codebook_gen codebook_kind, \
uint32_t topk, \
uint32_t max_samples, \
const float* cluster_centers, \
const float* pq_centers, \
const uint8_t* const* pq_dataset, \
const uint32_t* cluster_labels, \
const uint32_t* _chunk_indices, \
const float* queries, \
const uint32_t* index_list, \
float* query_kths, \
IvfSampleFilterT sample_filter, \
LutT* lut_scores, \
OutT* _out_scores, \
uint32_t* _out_indices);
#define COMMA ,
instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select(
half, half, raft::neighbors::filtering::none_ivf_sample_filter);
#undef COMMA
#undef instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select
|
dfdb8168bb7ab5a77044be9a9ca69b7b6575f8df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_MEAN_ABSOLUTE_ERROR_LAYER_INSTANTIATE
#include "lbann/layers/loss/mean_absolute_error.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
TensorDataType* __restrict__ contribution) {
// Indices
const int tid = threadIdx.x;
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute local contribution for each matrix column
for (int col = bidy; col < local_width; col += gridDim.y) {
// Compute contributions for each thread
TensorDataType private_contribution = TensorDataType(0.0);
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
private_contribution += cuda::abs(x - xhat);
}
// Shared memory reduction to get contribution for each block
/// @todo unroll loops
__shared__ TensorDataType shared_contribution[block_size];
shared_contribution[tid] = private_contribution;
for (int stride = block_size / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tid < stride) {
shared_contribution[tid] += shared_contribution[tid + stride];
}
}
if (tid == 0) {
shared_contribution[0] /= global_height;
cuda::atomic_add(&contribution[col], shared_contribution[0]);
}
}
}
template <typename TensorDataType>
void local_fp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
El::AbstractMatrix<TensorDataType>& local_contribution) {
El::Zero(local_contribution);
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
CHECK_CUDA(hipSetDevice(hydrogen::gpu::DefaultDevice()));
hipLaunchKernelGGL(( fp_kernel<block_size>)
, dim3(grid_dims), dim3(block_dims), 0, hydrogen::cuda::GetDefaultStream(),
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_contribution.Buffer());
}
}
template <int block_size, typename TensorDataType>
__global__ void bp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
TensorDataType* __restrict__ gradient_wrt_prediction,
int gradient_wrt_prediction_ldim,
TensorDataType* __restrict__ gradient_wrt_ground_truth,
int gradient_wrt_ground_truth_ldim) {
// Indices
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute gradients
for (int col = bidy; col < local_width; col += gridDim.y) {
const auto& dy = gradient_wrt_output[col];
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
auto& dx = gradient_wrt_prediction[row + col * gradient_wrt_prediction_ldim];
auto& dxhat = gradient_wrt_ground_truth[row + col * gradient_wrt_ground_truth_ldim];
const TensorDataType global_height_dt = TensorDataType(global_height);
if (x > xhat) {
dx = dy / global_height_dt;
dxhat = -dy / global_height_dt;
} else if (x < xhat) {
dx = -dy / global_height_dt;
dxhat = dy / global_height_dt;
} else {
dx = TensorDataType(0.0);
dxhat = TensorDataType(0.0);
}
}
}
}
template <typename TensorDataType>
void local_bp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_prediction,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_ground_truth) {
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
CHECK_CUDA(hipSetDevice(hydrogen::gpu::DefaultDevice()));
hipLaunchKernelGGL(( bp_kernel<block_size>)
, dim3(grid_dims), dim3(block_dims), 0, hydrogen::cuda::GetDefaultStream(),
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_prediction.Buffer(),
local_gradient_wrt_prediction.LDim(),
local_gradient_wrt_ground_truth.Buffer(),
local_gradient_wrt_ground_truth.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_fp_compute() {
local_fp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->Matrix());
}
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_bp_compute() {
local_bp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->LockedMatrix(),
this->get_local_error_signals(0),
this->get_local_error_signals(1));
}
#define PROTO(T) \
template class mean_absolute_error_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class mean_absolute_error_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| dfdb8168bb7ab5a77044be9a9ca69b7b6575f8df.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_MEAN_ABSOLUTE_ERROR_LAYER_INSTANTIATE
#include "lbann/layers/loss/mean_absolute_error.hpp"
namespace lbann {
namespace {
template <int block_size, typename TensorDataType>
__global__ void fp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
TensorDataType* __restrict__ contribution) {
// Indices
const int tid = threadIdx.x;
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute local contribution for each matrix column
for (int col = bidy; col < local_width; col += gridDim.y) {
// Compute contributions for each thread
TensorDataType private_contribution = TensorDataType(0.0);
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
private_contribution += cuda::abs(x - xhat);
}
// Shared memory reduction to get contribution for each block
/// @todo unroll loops
__shared__ TensorDataType shared_contribution[block_size];
shared_contribution[tid] = private_contribution;
for (int stride = block_size / 2; stride > 0; stride /= 2) {
__syncthreads();
if (tid < stride) {
shared_contribution[tid] += shared_contribution[tid + stride];
}
}
if (tid == 0) {
shared_contribution[0] /= global_height;
cuda::atomic_add(&contribution[col], shared_contribution[0]);
}
}
}
template <typename TensorDataType>
void local_fp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
El::AbstractMatrix<TensorDataType>& local_contribution) {
El::Zero(local_contribution);
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
CHECK_CUDA(cudaSetDevice(hydrogen::gpu::DefaultDevice()));
fp_kernel<block_size>
<<<grid_dims, block_dims, 0, hydrogen::cuda::GetDefaultStream()>>>(
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_contribution.Buffer());
}
}
template <int block_size, typename TensorDataType>
__global__ void bp_kernel(int global_height,
int local_height, int local_width,
const TensorDataType* __restrict__ prediction,
int prediction_ldim,
const TensorDataType* __restrict__ ground_truth,
int ground_truth_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
TensorDataType* __restrict__ gradient_wrt_prediction,
int gradient_wrt_prediction_ldim,
TensorDataType* __restrict__ gradient_wrt_ground_truth,
int gradient_wrt_ground_truth_ldim) {
// Indices
const int gidx = threadIdx.x + blockIdx.x * blockDim.x;
const int bidy = blockIdx.y;
const int nthreadsx = blockDim.x * gridDim.x;
// Compute gradients
for (int col = bidy; col < local_width; col += gridDim.y) {
const auto& dy = gradient_wrt_output[col];
for (int row = gidx; row < local_height; row += nthreadsx) {
const auto& x = prediction[row + col * prediction_ldim];
const auto& xhat = ground_truth[row + col * ground_truth_ldim];
auto& dx = gradient_wrt_prediction[row + col * gradient_wrt_prediction_ldim];
auto& dxhat = gradient_wrt_ground_truth[row + col * gradient_wrt_ground_truth_ldim];
const TensorDataType global_height_dt = TensorDataType(global_height);
if (x > xhat) {
dx = dy / global_height_dt;
dxhat = -dy / global_height_dt;
} else if (x < xhat) {
dx = -dy / global_height_dt;
dxhat = dy / global_height_dt;
} else {
dx = TensorDataType(0.0);
dxhat = TensorDataType(0.0);
}
}
}
}
template <typename TensorDataType>
void local_bp_gpu(El::Int height,
const El::AbstractMatrix<TensorDataType>& local_prediction,
const El::AbstractMatrix<TensorDataType>& local_ground_truth,
const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_prediction,
El::AbstractMatrix<TensorDataType>& local_gradient_wrt_ground_truth) {
const auto& local_height = local_prediction.Height();
const auto& local_width = local_prediction.Width();
if (local_height > 0 && local_width > 0) {
const int block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
grid_dims.y = local_width;
CHECK_CUDA(cudaSetDevice(hydrogen::gpu::DefaultDevice()));
bp_kernel<block_size>
<<<grid_dims, block_dims, 0, hydrogen::cuda::GetDefaultStream()>>>(
height, local_height, local_width,
local_prediction.LockedBuffer(), local_prediction.LDim(),
local_ground_truth.LockedBuffer(), local_ground_truth.LDim(),
local_gradient_wrt_output.LockedBuffer(),
local_gradient_wrt_prediction.Buffer(),
local_gradient_wrt_prediction.LDim(),
local_gradient_wrt_ground_truth.Buffer(),
local_gradient_wrt_ground_truth.LDim());
}
}
} // namespace
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_fp_compute() {
local_fp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->Matrix());
}
template <typename TensorDataType, data_layout T_layout, El::Device Dev>
void mean_absolute_error_layer<TensorDataType, T_layout, Dev>::local_bp_compute() {
local_bp_gpu(this->get_input_size(),
this->get_local_prev_activations(0),
this->get_local_prev_activations(1),
this->m_workspace->LockedMatrix(),
this->get_local_error_signals(0),
this->get_local_error_signals(1));
}
#define PROTO(T) \
template class mean_absolute_error_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class mean_absolute_error_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
32eca85ff5b5480110ba8a30305c49715642791a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_sqrtf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_sqrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_sqrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_sqrtf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 32eca85ff5b5480110ba8a30305c49715642791a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_sqrtf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_sqrtf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_sqrtf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_sqrtf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5f846303a5605acb93865b8f30d77087d7afd4a0.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Hertz pair potential
template hipError_t compute_pair_potential<azplugins::detail::PairEvaluatorHertz>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorHertz::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
| 5f846303a5605acb93865b8f30d77087d7afd4a0.cu | // Copyright (c) 2018-2020, Michael P. Howard
// Copyright (c) 2021, Auburn University
// This file is part of the azplugins project, released under the Modified BSD License.
#include "PairPotentials.cuh"
namespace azplugins
{
namespace gpu
{
//! Kernel driver for Hertz pair potential
template cudaError_t compute_pair_potential<azplugins::detail::PairEvaluatorHertz>
(const pair_args_t& pair_args,
const typename azplugins::detail::PairEvaluatorHertz::param_type *d_params);
} // end namespace gpu
} // end namespace azplugins
|
2025cb75c91fa6259a0e14e4d6c3b869ca7071bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file multi_sum_sq.cu
* \brief vectorized sums of squares norm over multiple arrays operators
* \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez, Shuai Zheng
*/
#include "./multi_sum_sq-inl.h"
#include <hipcub/hipcub.hpp>
#define ILP 4
#define BLOCK_LIMIT 320
#define ARRAY_LIMIT 110
namespace mxnet {
namespace op {
// Shamelessly gotten from:
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu
// https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h
const int chunk_size = 32768;
template <typename DType>
struct MultiSumSqKernelParam {
DType* addresses[ARRAY_LIMIT];
int sizes[ARRAY_LIMIT];
unsigned char block_to_tensor[BLOCK_LIMIT];
int block_to_chunk[BLOCK_LIMIT];
int max_chunks_per_tensor = -1;
};
template<typename DType>
__device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x,
DType val) {
int tid = threadIdx.x;
int block_size = blockDim.x;
if (block_size >= 64) {
x[tid] = val;
__syncthreads();
}
#pragma unroll
for (int i = (block_size >> 1); i >= 64; i >>= 1) {
if (tid < i)
x[tid] = x[tid] + x[tid+i];
__syncthreads();
}
DType final;
if (tid < 32) {
if (block_size >= 64)
final = x[tid] + x[tid+32];
else
final = val;
#pragma unroll
for (int i = 16; i >= 1; i >>= 1)
final = final + __shfl_down_sync(0xffffffff, final, i);
}
return final;
}
template<typename DType>
__global__ void MultiSumSqKernel(int chunk_size,
MultiSumSqKernelParam<DType> param,
float* block_reductions,
int start_tensor_id,
float scale) {
const int tensor_loc = param.block_to_tensor[blockIdx.x];
const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size;
const int n = param.sizes[tensor_loc] - chunk_len;
const DType* x = param.addresses[tensor_loc] + chunk_len;
const auto i_max = n <= chunk_size ? n : chunk_size;
__shared__ float vals[512];
// Non-divergent exit condition for __syncthreads, not necessary here
float val = 0;
for (int i_start = 0;
i_start < i_max;
i_start += blockDim.x * ILP) {
int i = i_start + threadIdx.x;
#pragma unroll
for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) {
auto incoming_val = static_cast<float>(x[i]);
if (scale != 1.0f) {
incoming_val *= scale;
}
val += incoming_val * incoming_val;
}
}
const float final = ReduceBlockIntoLanes(vals, val);
if (threadIdx.x == 0) {
block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor +
param.block_to_chunk[blockIdx.x]] = final;
}
}
template<typename DType>
__global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param,
float* block_reductions,
float* output) {
__shared__ float vals[512];
float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor;
float val = 0;
for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x)
val += reductions_this_tensor[i];
float final = ReduceBlockIntoLanes(vals, val);
if (threadIdx.x == 0)
output[blockIdx.x] = final;
}
template<>
size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob> &inputs,
int* param_max_chunks_per_tensor) {
// find max num of chunks in tensors
int max_chunks_per_tensor = -1;
for (size_t t = 0; t < inputs.size(); t++) {
int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size;
if (chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = chunks_this_tensor;
}
if (param_max_chunks_per_tensor != nullptr)
*param_max_chunks_per_tensor = max_chunks_per_tensor;
return inputs.size() * max_chunks_per_tensor * sizeof(float);
}
template<>
void MultiSumSqRun<gpu>(const std::vector<TBlob> &inputs, int n_inputs,
float *out_ptr, const OpContext &ctx, float scale) {
const int block_size = 512;
using namespace mxnet_op;
auto s = ctx.get_stream<gpu>();
auto stream = mshadow::Stream<gpu>::GetStream(s);
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MultiSumSqKernelParam<DType> param;
size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs,
¶m.max_chunks_per_tensor);
Tensor<gpu, 1, char> workspace =
ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>(
Shape1(workspace_size), s);
Tensor<gpu, 1, float> block_reductions(reinterpret_cast<float*>(&workspace[0]),
Shape1(n_inputs * param.max_chunks_per_tensor), s);
CUDA_CALL(hipMemsetAsync(block_reductions.dptr_, 0,
n_inputs * param.max_chunks_per_tensor* sizeof(float),
stream));
int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck
int loc_tensor_info = 0; // position in param.sizes and param.addresses
int start_tensor_id = 0;
for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs
param.sizes[loc_tensor_info] = inputs[t].shape_.Size();
param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_;
const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size;
for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index
param.block_to_tensor[loc_block_info] = loc_tensor_info;
param.block_to_chunk[loc_block_info] = chunk;
loc_block_info++;
const bool last_curr_chunk = chunk == chunks_this_tensor;
const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT-1);
const bool blocks_full = (loc_block_info == BLOCK_LIMIT);
const bool last_chunk = last_curr_chunk && t == n_inputs - 1;
if (!(tensors_full || blocks_full || last_chunk))
continue;
hipLaunchKernelGGL(( MultiSumSqKernel), dim3(loc_block_info), dim3(block_size), 0, stream,
chunk_size, param, block_reductions.dptr_, start_tensor_id, scale);
MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel);
loc_block_info = 0;
if (last_curr_chunk) { // if you start from a new tensor
loc_tensor_info = -1;
start_tensor_id = t + 1;
} else { // if you start from the same tensor
param.sizes[0] = param.sizes[loc_tensor_info];
param.addresses[0] = param.addresses[loc_tensor_info];
loc_tensor_info = 0;
start_tensor_id = t;
}
}
}
// Global reduction
hipLaunchKernelGGL(( GlobalReductionKernel), dim3(n_inputs), dim3(block_size), 0, stream,
param, block_reductions.dptr_, out_ptr);
});
}
NNVM_REGISTER_OP(multi_sum_sq)
.set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>);
} // namespace op
} // namespace mxnet
| 2025cb75c91fa6259a0e14e4d6c3b869ca7071bd.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file multi_sum_sq.cu
* \brief vectorized sums of squares norm over multiple arrays operators
* \author Clement Fuji Tsang, Andrei Ivanov, Moises Hernandez, Shuai Zheng
*/
#include "./multi_sum_sq-inl.h"
#include <cub/cub.cuh>
#define ILP 4
#define BLOCK_LIMIT 320
#define ARRAY_LIMIT 110
namespace mxnet {
namespace op {
// Shamelessly gotten from:
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu
// https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h
const int chunk_size = 32768;
template <typename DType>
struct MultiSumSqKernelParam {
DType* addresses[ARRAY_LIMIT];
int sizes[ARRAY_LIMIT];
unsigned char block_to_tensor[BLOCK_LIMIT];
int block_to_chunk[BLOCK_LIMIT];
int max_chunks_per_tensor = -1;
};
template<typename DType>
__device__ __forceinline__ DType ReduceBlockIntoLanes(DType* x,
DType val) {
int tid = threadIdx.x;
int block_size = blockDim.x;
if (block_size >= 64) {
x[tid] = val;
__syncthreads();
}
#pragma unroll
for (int i = (block_size >> 1); i >= 64; i >>= 1) {
if (tid < i)
x[tid] = x[tid] + x[tid+i];
__syncthreads();
}
DType final;
if (tid < 32) {
if (block_size >= 64)
final = x[tid] + x[tid+32];
else
final = val;
#pragma unroll
for (int i = 16; i >= 1; i >>= 1)
final = final + __shfl_down_sync(0xffffffff, final, i);
}
return final;
}
template<typename DType>
__global__ void MultiSumSqKernel(int chunk_size,
MultiSumSqKernelParam<DType> param,
float* block_reductions,
int start_tensor_id,
float scale) {
const int tensor_loc = param.block_to_tensor[blockIdx.x];
const int chunk_len = param.block_to_chunk[blockIdx.x] * chunk_size;
const int n = param.sizes[tensor_loc] - chunk_len;
const DType* x = param.addresses[tensor_loc] + chunk_len;
const auto i_max = n <= chunk_size ? n : chunk_size;
__shared__ float vals[512];
// Non-divergent exit condition for __syncthreads, not necessary here
float val = 0;
for (int i_start = 0;
i_start < i_max;
i_start += blockDim.x * ILP) {
int i = i_start + threadIdx.x;
#pragma unroll
for (int ii = 0; ii < ILP && i < i_max; ++ii, i += blockDim.x) {
auto incoming_val = static_cast<float>(x[i]);
if (scale != 1.0f) {
incoming_val *= scale;
}
val += incoming_val * incoming_val;
}
}
const float final = ReduceBlockIntoLanes(vals, val);
if (threadIdx.x == 0) {
block_reductions[(start_tensor_id + tensor_loc) * param.max_chunks_per_tensor +
param.block_to_chunk[blockIdx.x]] = final;
}
}
template<typename DType>
__global__ void GlobalReductionKernel(MultiSumSqKernelParam<DType> param,
float* block_reductions,
float* output) {
__shared__ float vals[512];
float* reductions_this_tensor = block_reductions + blockIdx.x * param.max_chunks_per_tensor;
float val = 0;
for (int i = threadIdx.x; i < param.max_chunks_per_tensor; i += blockDim.x)
val += reductions_this_tensor[i];
float final = ReduceBlockIntoLanes(vals, val);
if (threadIdx.x == 0)
output[blockIdx.x] = final;
}
template<>
size_t GetRequiredStorageMultiSumSq<gpu>(const std::vector<TBlob> &inputs,
int* param_max_chunks_per_tensor) {
// find max num of chunks in tensors
int max_chunks_per_tensor = -1;
for (size_t t = 0; t < inputs.size(); t++) {
int chunks_this_tensor = (inputs[t].shape_.Size() + chunk_size - 1) / chunk_size;
if (chunks_this_tensor > max_chunks_per_tensor)
max_chunks_per_tensor = chunks_this_tensor;
}
if (param_max_chunks_per_tensor != nullptr)
*param_max_chunks_per_tensor = max_chunks_per_tensor;
return inputs.size() * max_chunks_per_tensor * sizeof(float);
}
template<>
void MultiSumSqRun<gpu>(const std::vector<TBlob> &inputs, int n_inputs,
float *out_ptr, const OpContext &ctx, float scale) {
const int block_size = 512;
using namespace mxnet_op;
auto s = ctx.get_stream<gpu>();
auto stream = mshadow::Stream<gpu>::GetStream(s);
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
MultiSumSqKernelParam<DType> param;
size_t workspace_size = GetRequiredStorageMultiSumSq<gpu>(inputs,
¶m.max_chunks_per_tensor);
Tensor<gpu, 1, char> workspace =
ctx.requested[multi_sum_sq::kTempSpace].get_space_typed<gpu, 1, char>(
Shape1(workspace_size), s);
Tensor<gpu, 1, float> block_reductions(reinterpret_cast<float*>(&workspace[0]),
Shape1(n_inputs * param.max_chunks_per_tensor), s);
CUDA_CALL(cudaMemsetAsync(block_reductions.dptr_, 0,
n_inputs * param.max_chunks_per_tensor* sizeof(float),
stream));
int loc_block_info = 0; // position in param.block_to_tensor and param.block_to_chunck
int loc_tensor_info = 0; // position in param.sizes and param.addresses
int start_tensor_id = 0;
for (int t = 0; t < n_inputs; t++, loc_tensor_info++) { // array index in inputs
param.sizes[loc_tensor_info] = inputs[t].shape_.Size();
param.addresses[loc_tensor_info] = inputs[t].FlatTo2D<gpu, DType>(s).dptr_;
const int chunks_this_tensor = (inputs[t].shape_.Size() - 1) / chunk_size;
for (int chunk = 0; chunk <= chunks_this_tensor; ++chunk) { // array chunk index
param.block_to_tensor[loc_block_info] = loc_tensor_info;
param.block_to_chunk[loc_block_info] = chunk;
loc_block_info++;
const bool last_curr_chunk = chunk == chunks_this_tensor;
const bool tensors_full = last_curr_chunk && loc_tensor_info == (ARRAY_LIMIT-1);
const bool blocks_full = (loc_block_info == BLOCK_LIMIT);
const bool last_chunk = last_curr_chunk && t == n_inputs - 1;
if (!(tensors_full || blocks_full || last_chunk))
continue;
MultiSumSqKernel<<<loc_block_info, block_size, 0, stream>>>
(chunk_size, param, block_reductions.dptr_, start_tensor_id, scale);
MSHADOW_CUDA_POST_KERNEL_CHECK(MultiSumSqKernel);
loc_block_info = 0;
if (last_curr_chunk) { // if you start from a new tensor
loc_tensor_info = -1;
start_tensor_id = t + 1;
} else { // if you start from the same tensor
param.sizes[0] = param.sizes[loc_tensor_info];
param.addresses[0] = param.addresses[loc_tensor_info];
loc_tensor_info = 0;
start_tensor_id = t;
}
}
}
// Global reduction
GlobalReductionKernel<<<n_inputs, block_size, 0, stream>>>
(param, block_reductions.dptr_, out_ptr);
});
}
NNVM_REGISTER_OP(multi_sum_sq)
.set_attr<FCompute>("FCompute<gpu>", MultiSumSq<gpu>);
} // namespace op
} // namespace mxnet
|
4e9778e8488b1c26f753736de6024ee46c34e81c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/warp_perspective/common.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
namespace megdnn {
namespace cuda {
namespace warp_perspective {
const int factor = 4;
template <typename Getter, int factor>
__global__ void warp_perspective_bwd_data_kernel(
const float* hidden, const float* mat, const int* midx, float* dst, int N,
int C, int IH, int IW, int OH, int OW) {
Getter getter;
int n = blockIdx.z;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
hidden += n * C * OH * OW;
if (midx) {
dst += midx[n] * C * factor * IH * IW;
} else {
dst += n * C * factor * IH * IW;
}
mat += n * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i = ow & (factor - 1);
for (int c = 0; c < C; ++c) {
atomicAdd(
dst + ih0 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * nbeta);
atomicAdd(
dst + ih0 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * pbeta);
atomicAdd(
dst + ih1 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * palpha * nbeta);
atomicAdd(
dst + ih1 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * palpha * pbeta);
hidden += OH * OW;
dst += factor * IH * IW;
}
}
}
template <int factor>
__global__ void add_up_kernel(const float* src, float* dst, int IP) {
int nc = blockIdx.y;
int ip = blockIdx.x * blockDim.x + threadIdx.x;
src += nc * IP * factor;
dst += nc * IP;
if (ip < IP) {
dst[ip] = src[ip];
#pragma unroll
for (int i = 1; i < factor; ++i)
dst[ip] += src[ip + i * IP];
}
}
template <int factor>
__global__ void warp_perspective_bwd_data_constant_kernel(
const float* hidden, const float* mat, const int* midx, float* dst, int N,
int C, int IH, int IW, int OH, int OW) {
int n = blockIdx.z;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
hidden += n * C * OH * OW;
if (midx) {
dst += midx[n] * C * factor * IH * IW;
} else {
dst += n * C * factor * IH * IW;
}
mat += n * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i = ow & (factor - 1);
if (isfinite(ih) && isfinite(iw)) {
for (int c = 0; c < C; ++c) {
if (okh0 && okw0)
atomicAdd(
dst + ih0 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * nbeta);
if (okh0 && okw1)
atomicAdd(
dst + ih0 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * pbeta);
if (okh1 && okw0)
atomicAdd(
dst + ih1 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * palpha * nbeta);
if (okh1 && okw1)
atomicAdd(
dst + ih1 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * palpha * pbeta);
hidden += OH * OW;
dst += factor * IH * IW;
}
}
}
}
size_t get_backward_data_workspace_in_bytes(
int N, int C, int IH, int IW, int /* OH */, int /* OW */,
BorderMode /* bmode */) {
return N * C * IH * IW * factor * sizeof(float);
}
void backward_data_proxy(
const float* mat, const int* midx, const float* diff, float* grad,
float* workspace, int N, int N_SRC, int C, int IH, int IW, int OH, int OW,
float bval, BorderMode mode, hipStream_t stream) {
(void)bval;
(void)grad;
const int BY = 16, BX = 32;
{
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, N);
if (midx) {
cuda_check(hipMemsetAsync(
workspace, 0, sizeof(float) * factor * N_SRC * C * IH * IW,
stream));
} else {
cuda_check(hipMemsetAsync(
workspace, 0, sizeof(float) * factor * N * C * IH * IW, stream));
}
#define DISPATCH(Getter) \
hipLaunchKernelGGL(( warp_perspective_bwd_data_kernel<Getter, factor>), dim3(blocks), dim3(threads), 0, stream, \
diff, mat, midx, workspace, N, C, IH, IW, OH, OW);
switch (mode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT:
hipLaunchKernelGGL(( warp_perspective_bwd_data_constant_kernel<factor>)
, dim3(blocks), dim3(threads), 0, stream,
diff, mat, midx, workspace, N, C, IH, IW, OH, OW);
break;
default:
break;
}
#undef DISPATCH
}
{
int THREADS = 512;
dim3 threads(THREADS);
if (midx) {
dim3 blocks((IH * IW + THREADS - 1) / THREADS, N_SRC * C);
hipLaunchKernelGGL(( add_up_kernel<factor>)
, dim3(blocks), dim3(threads), 0, stream, workspace, grad, IH * IW);
} else {
dim3 blocks((IH * IW + THREADS - 1) / THREADS, N * C);
hipLaunchKernelGGL(( add_up_kernel<factor>)
, dim3(blocks), dim3(threads), 0, stream, workspace, grad, IH * IW);
}
}
after_kernel_launch();
}
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 4e9778e8488b1c26f753736de6024ee46c34e81c.cu | #include "src/cuda/warp_perspective/common.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
namespace megdnn {
namespace cuda {
namespace warp_perspective {
const int factor = 4;
template <typename Getter, int factor>
__global__ void warp_perspective_bwd_data_kernel(
const float* hidden, const float* mat, const int* midx, float* dst, int N,
int C, int IH, int IW, int OH, int OW) {
Getter getter;
int n = blockIdx.z;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
hidden += n * C * OH * OW;
if (midx) {
dst += midx[n] * C * factor * IH * IW;
} else {
dst += n * C * factor * IH * IW;
}
mat += n * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i = ow & (factor - 1);
for (int c = 0; c < C; ++c) {
atomicAdd(
dst + ih0 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * nbeta);
atomicAdd(
dst + ih0 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * pbeta);
atomicAdd(
dst + ih1 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * palpha * nbeta);
atomicAdd(
dst + ih1 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * palpha * pbeta);
hidden += OH * OW;
dst += factor * IH * IW;
}
}
}
template <int factor>
__global__ void add_up_kernel(const float* src, float* dst, int IP) {
int nc = blockIdx.y;
int ip = blockIdx.x * blockDim.x + threadIdx.x;
src += nc * IP * factor;
dst += nc * IP;
if (ip < IP) {
dst[ip] = src[ip];
#pragma unroll
for (int i = 1; i < factor; ++i)
dst[ip] += src[ip + i * IP];
}
}
template <int factor>
__global__ void warp_perspective_bwd_data_constant_kernel(
const float* hidden, const float* mat, const int* midx, float* dst, int N,
int C, int IH, int IW, int OH, int OW) {
int n = blockIdx.z;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
hidden += n * C * OH * OW;
if (midx) {
dst += midx[n] * C * factor * IH * IW;
} else {
dst += n * C * factor * IH * IW;
}
mat += n * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i = ow & (factor - 1);
if (isfinite(ih) && isfinite(iw)) {
for (int c = 0; c < C; ++c) {
if (okh0 && okw0)
atomicAdd(
dst + ih0 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * nbeta);
if (okh0 && okw1)
atomicAdd(
dst + ih0 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * nalpha * pbeta);
if (okh1 && okw0)
atomicAdd(
dst + ih1 * IW + iw0 + i * IH * IW,
hidden[oh * OW + ow] * palpha * nbeta);
if (okh1 && okw1)
atomicAdd(
dst + ih1 * IW + iw1 + i * IH * IW,
hidden[oh * OW + ow] * palpha * pbeta);
hidden += OH * OW;
dst += factor * IH * IW;
}
}
}
}
size_t get_backward_data_workspace_in_bytes(
int N, int C, int IH, int IW, int /* OH */, int /* OW */,
BorderMode /* bmode */) {
return N * C * IH * IW * factor * sizeof(float);
}
void backward_data_proxy(
const float* mat, const int* midx, const float* diff, float* grad,
float* workspace, int N, int N_SRC, int C, int IH, int IW, int OH, int OW,
float bval, BorderMode mode, cudaStream_t stream) {
(void)bval;
(void)grad;
const int BY = 16, BX = 32;
{
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, N);
if (midx) {
cuda_check(cudaMemsetAsync(
workspace, 0, sizeof(float) * factor * N_SRC * C * IH * IW,
stream));
} else {
cuda_check(cudaMemsetAsync(
workspace, 0, sizeof(float) * factor * N * C * IH * IW, stream));
}
#define DISPATCH(Getter) \
warp_perspective_bwd_data_kernel<Getter, factor><<<blocks, threads, 0, stream>>>( \
diff, mat, midx, workspace, N, C, IH, IW, OH, OW);
switch (mode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT:
warp_perspective_bwd_data_constant_kernel<factor>
<<<blocks, threads, 0, stream>>>(
diff, mat, midx, workspace, N, C, IH, IW, OH, OW);
break;
default:
break;
}
#undef DISPATCH
}
{
int THREADS = 512;
dim3 threads(THREADS);
if (midx) {
dim3 blocks((IH * IW + THREADS - 1) / THREADS, N_SRC * C);
add_up_kernel<factor>
<<<blocks, threads, 0, stream>>>(workspace, grad, IH * IW);
} else {
dim3 blocks((IH * IW + THREADS - 1) / THREADS, N * C);
add_up_kernel<factor>
<<<blocks, threads, 0, stream>>>(workspace, grad, IH * IW);
}
}
after_kernel_launch();
}
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
500b4b90f28290c603dd3ff5667f5b1c66e220e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> //required for CUDA
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <limits.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <cassert>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <vector>
using namespace std;
#define MAX_N_TERMS 10
__global__ void MC_Integratev1(float* degrees,int dimension,int n_terms,float* I_val,hiprandState_t *states, long int seed,int thread_max_iterations)
{
//Get the Global ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
float x;
float I = 0.0;
float f[MAX_N_TERMS];
//float* f =new float[n_terms];
//Initialize the random number generator
hiprand_init(seed, id, 0, &states[id]);
for (int iter_count=0;iter_count< thread_max_iterations;iter_count++)
{
//Initialize f with the coefficients
for (int term_i=0;term_i<n_terms;term_i++)
{
f[term_i]=degrees[(2+term_i)*dimension];
}
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = hiprand_uniform (&states[id]); //x between 0 and 1
//Generate dimension sample based on the limits of the dimension
x = x*(degrees[1*dimension+d]-degrees[0*dimension+d])+degrees[0*dimension+d];
for (int term_i=0;term_i<n_terms;term_i++)
{
//Multiply f of this term by x^(power of this dimension in this term)
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
//Add the evaluation to the private summation
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
}
//Add the private summation to the global summation
atomicAdd(I_val,I);
}
__global__ void MC_Integratev2(float* degrees_g,int dimension,int n_terms,float* I_val, long int seed,int thread_max_iterations)
{
//Get the global and local ids
int id = blockIdx.x*blockDim.x+threadIdx.x;
int lid=threadIdx.x;
float x;
float I = 0.0;
float f[MAX_N_TERMS];
//float* f =new float[n_terms];
//Dynamically allocate shared memory for 'degrees' and 'I_shared'
extern __shared__ float shared_mem[];
float* I_shared = shared_mem;
I_shared[0]=0;
float* degrees = &shared_mem[1];
//Initialize the local copy of 'degrees' for the shared copy
if(lid<(2+n_terms)*dimension)
{
//copy one element of degrees
degrees[lid]=degrees_g[lid];
}
// Create a state in private memory
hiprandState_t state;
//Initialize the random number generator
hiprand_init(seed,id,0,&state);
//Synchronize all threads to assure that 'degrees' is initialized
__syncthreads();
for (int iter_count=0;iter_count< thread_max_iterations;iter_count++)
{
//Initialize f with the coefficients
for (int term_i=0;term_i<n_terms;term_i++)
{
f[term_i]=degrees[(2+term_i)*dimension];
}
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = hiprand_uniform (&state); //x between 0 and 1
//Generate dimension sample based on the limits of the dimension
x = x*(degrees[1*dimension+d]-degrees[0*dimension+d])+degrees[0*dimension+d];
for (int term_i=0;term_i<n_terms;term_i++)
{
//Multiply f of this term by x^(power of this dimension in this term)
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
//Add the evaluation to the private summation
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
}
//Add the private summation to the shared summation
atomicAdd(I_shared,I);
//Synchronize all the threads to assure they all added their private summations to the shared summation
__syncthreads();
//Thread 0 in the block add the shared summation to the global summation
if(lid==0)
{
atomicAdd(I_val,*I_shared);
}
}
int main(int argc, char** argv)
{
//----------------------------------
// Parse Command Line
//----------------------------------
if (argc < 8)
{
std::cerr << "Required Command-Line Arguments Are:\n";
std::cerr << "Text file name\n";
std::cerr << "Method (1 or 2)\n";
std::cerr << "Dimension \n";
std::cerr << "Number of Blocks \n";
std::cerr << "Number of Threads per Block \n";
std::cerr << "Number of iterations in a thread \n";
std::cerr << "Validation (1 to validate, 2 validate and show polynomial) \n";
return -1;
}
string filename=argv[1];
int Method = atol(argv[2]);
int dimension = atol(argv[3]);
long long int N_blocks = atol(argv[4]);
int N_threads = atol(argv[5]);
int thread_max_iterations=atol(argv[6]);
int Validate=atol(argv[7]);
long int max_evaluations = N_blocks*N_threads*thread_max_iterations;
//----------------------------------
// Read The file into an array (degrees)
//----------------------------------
//Each line in the file represent a term in the polynomial where the first number is the coefficient
//and the following numbers are the powers of the variables of each dimension in order
//Line 0: Lower limits for each dimension
//Line 1: Upper limits for each dimension
//Accordingly the first element (coefficient) in the first two lines are ignored
//Add one to the dimension as the first element in every line is the coefficient (first dimension is 1)
dimension++;
string line,number;
ifstream myfile (filename.c_str());
float temp=0;
std::vector<float> degrees (0);
int line_c=0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
std::stringstream linestream(line);
int number_c=0;
degrees.resize((line_c+1)*dimension);
while ( getline (linestream,number,' ' ) )
{
stringstream ss;
ss<<number;
ss>>temp;
degrees[line_c*dimension+number_c]=temp;
number_c++;
}
assert(number_c==dimension);
line_c++;
}
//First two lines are the limits and we need at least one term in the polynomial
assert(line_c>2);
myfile.close();
}
else cout << "Unable to open file";
//n_terms: Number of terms in the polynomial (first two lines are the limits)
int n_terms=line_c-2;
if(n_terms>MAX_N_TERMS)
{
std::cerr<<"The Maximum Number of terms defined in the code is "<<MAX_N_TERMS<<std::endl;
return -1;
}
//----------------------------------
//Display the numbers in the file (same format as the file)
//----------------------------------
if(Validate==2)
{
std::cout<<"-----------------------------"<<std::endl;
std::cout<<"Upper Limit for dimensions = ";
for(int j=1;j<dimension;j++)
{
std::cout<<degrees[j]<<" ";
}
std::cout<<std::endl;
std::cout<<"Lower Limit for dimensions = ";
for(int j=1;j<dimension;j++)
{
std::cout<<degrees[dimension+j]<<" ";
}
std::cout<<std::endl;
for (int i=2;i<line_c;i++)
{
std::cout<<"Term "<<i-2<<" Coefficient = ";
for(int j=0;j<dimension;j++)
{
if(j==0)
{
std::cout<<degrees[i*dimension+j]<<", Powers = ";
}
else
{
std::cout<<degrees[i*dimension+j]<<" ";
}
}
std::cout<<std::endl;
}
std::cout<<"-----------------------------"<<std::endl;
}
//----------------------------------
//Calculate the Analytical solution
//----------------------------------
double Ianalytical=0;
for (int term_i=0;term_i<n_terms;term_i++)
{
double a,b,I;
//Initialize by the coefficient
I=degrees[(2+term_i)*dimension];
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
b=pow(b,degrees[(2+term_i)*dimension+d]+1);
a=pow(a,degrees[(2+term_i)*dimension+d]+1);
I*=(b-a)/(double)(degrees[(2+term_i)*dimension+d]+1);
}
Ianalytical+=I;
}
std::cout<<"Analytical Solution = "<< Ianalytical <<std::endl;
std::cout<<"-----------------------------"<<std::endl;
//************************//
// PARALLEL RUN //
//***********************//
std::cout<<"Parallel Case using method "<<Method<<": "<<std::endl;
std::cout<< "Number of blocks = " << N_blocks <<std::endl;
std::cout <<"Number of threads per block = " << N_threads<<std::endl;
std::cout<< "Number of Iterations per thread = " << thread_max_iterations << std::endl;
std::cout<<"Total number of Evaluations = "<<max_evaluations<<std::endl;
std::cout<<"Dimension = "<<dimension-1<<std::endl;
std::cout<<"-----------------------------"<<std::endl;
//---------------------------------------
//Initial Setup (Check the Block and grid sizes)
//---------------------------------------
//Get Device properties
hipDeviceProp_t device_properties;
hipGetDeviceProperties (&device_properties, 0);
if (N_threads>device_properties.maxThreadsDim[0])
{
std::cerr << "Maximum threads for dimension 0 = " << device_properties.maxThreadsDim[0] << std::endl;
return -1;
}
if(N_blocks>device_properties.maxGridSize[0])
{
std::cerr << "Maximum grid dimension 0 = " << device_properties.maxGridSize[0] << std::endl;
return -1;
}
//---------------------------------------
// Setup Profiling
//---------------------------------------
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//I_val: Final Estimated Value of the Integral
float I_val=0;
//Pointers to data on the device
float *devDegrees;
float *dev_I_val;
hiprandState_t *devStates;
//seed the host random number generator to get a random seed for the Curand
srand(clock());
//Random seed to be used for Curand
long int seed = rand();
hipError_t err;
//Allocate memory for A,B,C on device
//Pass the address of a pointer where the malloc function will write the address of the data in it
//it have to be casted to a void pointer
if (Method!=2)
{err = hipMalloc( (void **)&devStates, N_blocks*N_threads * sizeof(hiprandState_t) );assert(err == hipSuccess);}
err = hipMalloc((void**)&devDegrees,degrees.size()*sizeof(float));assert(err == hipSuccess);
err = hipMalloc((void**)&dev_I_val,sizeof(float));assert(err == hipSuccess);
//Copy the data to the device
// CudaMemcpy(TO_ADDRESS, FROM_ADDRESS, NUMBER_OF_BYTES, DIRECTION)
//Where the direction is either hipMemcpyHostToDevice or hipMemcpyDeviceToHost
err = hipMemcpy( devDegrees,°rees[0],degrees.size()*sizeof(float),hipMemcpyHostToDevice);assert(err == hipSuccess);
err = hipMemcpy( dev_I_val,&I_val,sizeof(float),hipMemcpyHostToDevice);assert(err == hipSuccess);
//RUN THE KERNEL
if(Method==1)
{
hipLaunchKernelGGL(( MC_Integratev1), dim3(N_blocks),dim3(N_threads), 0, 0, devDegrees,dimension,n_terms,dev_I_val,devStates,seed,thread_max_iterations);
}
else if (Method ==2)
{
hipLaunchKernelGGL(( MC_Integratev2), dim3(N_blocks),dim3(N_threads),(1+(2+n_terms)*dimension)*sizeof(float), 0, devDegrees,dimension,n_terms,dev_I_val,seed,thread_max_iterations);
}
else
{
std::cerr<<"Please enter a valid method"<<std::endl;
hipFree(devDegrees);
hipFree(dev_I_val);
return -1;
}
//Copy the result to the Host
err =hipMemcpy(&I_val,dev_I_val,sizeof(float),hipMemcpyDeviceToHost);assert(err == hipSuccess);
//FREE MEMORY ON DEVICE
hipFree(devDegrees);
hipFree(dev_I_val);
if (Method!=2)
{hipFree(devStates);}
//Multiply by the Volume
float a,b;
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
I_val*=(b-a);
}
//Divide by the total number of evaluations
I_val/=(float)N_blocks;
I_val/=(float)N_threads;
I_val/=(float)thread_max_iterations;
//---------------------------------------
// Stop Profiling
//---------------------------------------
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float gpu_time;
hipEventElapsedTime(&gpu_time, start, stop); //time in milliseconds
gpu_time /= 1000.0;
std::cout<<"GPU Results: "<<std::endl;
std::cout <<"I = " << I_val << ", GPU time = "<<gpu_time<<std::endl;
//******************//
// SERIAL RUN //
//*****************//
if (Validate==1||Validate==2)
{
std::cout<<"-----------------------------"<<std::endl;
std::cout<<"Host Results: "<<std::endl;
double t_start_cpu = (double)clock()/(double)CLOCKS_PER_SEC;
//Set f_0_s to hold the coefficients of the polynomial terms
std::vector<double> f_0_s (n_terms,0);
for (int term_i=0;term_i<n_terms;term_i++)
{
f_0_s[term_i]=degrees[(2+term_i)*dimension];
}
srand(clock()); //seed the random number generator
long int N = 0;
double x;
double I = 0.0;
double a,b;
std::vector<double> f (n_terms,0);
do
{
//Initialize f with the coefficients
f=f_0_s;
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = (double)rand()/(double)RAND_MAX; //x between 0 and 1
//limits
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
x = x*(b-a) + a; //x between a2 and b2
for (int term_i=0;term_i<n_terms;term_i++)
{
//2: first 2 lines are the limits
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
N++;
}
while (N <= max_evaluations);
//Multiply by the Volume
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
I*=(b-a);
}
I/=(double)N;
double t_stop_cpu = (double)clock()/(double)CLOCKS_PER_SEC;
double cpu_time=t_stop_cpu-t_start_cpu;
std::cout <<"I = " << I << ", Host time = "<<cpu_time<<std::endl;
std::cout<<"Speed up = "<<cpu_time/gpu_time<<std::endl;
}
}
| 500b4b90f28290c603dd3ff5667f5b1c66e220e8.cu | #include <cuda.h> //required for CUDA
#include <curand_kernel.h>
#include <time.h>
#include <limits.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <cassert>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <vector>
using namespace std;
#define MAX_N_TERMS 10
__global__ void MC_Integratev1(float* degrees,int dimension,int n_terms,float* I_val,curandState *states, long int seed,int thread_max_iterations)
{
//Get the Global ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
float x;
float I = 0.0;
float f[MAX_N_TERMS];
//float* f =new float[n_terms];
//Initialize the random number generator
curand_init(seed, id, 0, &states[id]);
for (int iter_count=0;iter_count< thread_max_iterations;iter_count++)
{
//Initialize f with the coefficients
for (int term_i=0;term_i<n_terms;term_i++)
{
f[term_i]=degrees[(2+term_i)*dimension];
}
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = curand_uniform (&states[id]); //x between 0 and 1
//Generate dimension sample based on the limits of the dimension
x = x*(degrees[1*dimension+d]-degrees[0*dimension+d])+degrees[0*dimension+d];
for (int term_i=0;term_i<n_terms;term_i++)
{
//Multiply f of this term by x^(power of this dimension in this term)
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
//Add the evaluation to the private summation
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
}
//Add the private summation to the global summation
atomicAdd(I_val,I);
}
__global__ void MC_Integratev2(float* degrees_g,int dimension,int n_terms,float* I_val, long int seed,int thread_max_iterations)
{
//Get the global and local ids
int id = blockIdx.x*blockDim.x+threadIdx.x;
int lid=threadIdx.x;
float x;
float I = 0.0;
float f[MAX_N_TERMS];
//float* f =new float[n_terms];
//Dynamically allocate shared memory for 'degrees' and 'I_shared'
extern __shared__ float shared_mem[];
float* I_shared = shared_mem;
I_shared[0]=0;
float* degrees = &shared_mem[1];
//Initialize the local copy of 'degrees' for the shared copy
if(lid<(2+n_terms)*dimension)
{
//copy one element of degrees
degrees[lid]=degrees_g[lid];
}
// Create a state in private memory
curandState state;
//Initialize the random number generator
curand_init(seed,id,0,&state);
//Synchronize all threads to assure that 'degrees' is initialized
__syncthreads();
for (int iter_count=0;iter_count< thread_max_iterations;iter_count++)
{
//Initialize f with the coefficients
for (int term_i=0;term_i<n_terms;term_i++)
{
f[term_i]=degrees[(2+term_i)*dimension];
}
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = curand_uniform (&state); //x between 0 and 1
//Generate dimension sample based on the limits of the dimension
x = x*(degrees[1*dimension+d]-degrees[0*dimension+d])+degrees[0*dimension+d];
for (int term_i=0;term_i<n_terms;term_i++)
{
//Multiply f of this term by x^(power of this dimension in this term)
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
//Add the evaluation to the private summation
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
}
//Add the private summation to the shared summation
atomicAdd(I_shared,I);
//Synchronize all the threads to assure they all added their private summations to the shared summation
__syncthreads();
//Thread 0 in the block add the shared summation to the global summation
if(lid==0)
{
atomicAdd(I_val,*I_shared);
}
}
int main(int argc, char** argv)
{
//----------------------------------
// Parse Command Line
//----------------------------------
if (argc < 8)
{
std::cerr << "Required Command-Line Arguments Are:\n";
std::cerr << "Text file name\n";
std::cerr << "Method (1 or 2)\n";
std::cerr << "Dimension \n";
std::cerr << "Number of Blocks \n";
std::cerr << "Number of Threads per Block \n";
std::cerr << "Number of iterations in a thread \n";
std::cerr << "Validation (1 to validate, 2 validate and show polynomial) \n";
return -1;
}
string filename=argv[1];
int Method = atol(argv[2]);
int dimension = atol(argv[3]);
long long int N_blocks = atol(argv[4]);
int N_threads = atol(argv[5]);
int thread_max_iterations=atol(argv[6]);
int Validate=atol(argv[7]);
long int max_evaluations = N_blocks*N_threads*thread_max_iterations;
//----------------------------------
// Read The file into an array (degrees)
//----------------------------------
//Each line in the file represent a term in the polynomial where the first number is the coefficient
//and the following numbers are the powers of the variables of each dimension in order
//Line 0: Lower limits for each dimension
//Line 1: Upper limits for each dimension
//Accordingly the first element (coefficient) in the first two lines are ignored
//Add one to the dimension as the first element in every line is the coefficient (first dimension is 1)
dimension++;
string line,number;
ifstream myfile (filename.c_str());
float temp=0;
std::vector<float> degrees (0);
int line_c=0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
std::stringstream linestream(line);
int number_c=0;
degrees.resize((line_c+1)*dimension);
while ( getline (linestream,number,' ' ) )
{
stringstream ss;
ss<<number;
ss>>temp;
degrees[line_c*dimension+number_c]=temp;
number_c++;
}
assert(number_c==dimension);
line_c++;
}
//First two lines are the limits and we need at least one term in the polynomial
assert(line_c>2);
myfile.close();
}
else cout << "Unable to open file";
//n_terms: Number of terms in the polynomial (first two lines are the limits)
int n_terms=line_c-2;
if(n_terms>MAX_N_TERMS)
{
std::cerr<<"The Maximum Number of terms defined in the code is "<<MAX_N_TERMS<<std::endl;
return -1;
}
//----------------------------------
//Display the numbers in the file (same format as the file)
//----------------------------------
if(Validate==2)
{
std::cout<<"-----------------------------"<<std::endl;
std::cout<<"Upper Limit for dimensions = ";
for(int j=1;j<dimension;j++)
{
std::cout<<degrees[j]<<" ";
}
std::cout<<std::endl;
std::cout<<"Lower Limit for dimensions = ";
for(int j=1;j<dimension;j++)
{
std::cout<<degrees[dimension+j]<<" ";
}
std::cout<<std::endl;
for (int i=2;i<line_c;i++)
{
std::cout<<"Term "<<i-2<<" Coefficient = ";
for(int j=0;j<dimension;j++)
{
if(j==0)
{
std::cout<<degrees[i*dimension+j]<<", Powers = ";
}
else
{
std::cout<<degrees[i*dimension+j]<<" ";
}
}
std::cout<<std::endl;
}
std::cout<<"-----------------------------"<<std::endl;
}
//----------------------------------
//Calculate the Analytical solution
//----------------------------------
double Ianalytical=0;
for (int term_i=0;term_i<n_terms;term_i++)
{
double a,b,I;
//Initialize by the coefficient
I=degrees[(2+term_i)*dimension];
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
b=pow(b,degrees[(2+term_i)*dimension+d]+1);
a=pow(a,degrees[(2+term_i)*dimension+d]+1);
I*=(b-a)/(double)(degrees[(2+term_i)*dimension+d]+1);
}
Ianalytical+=I;
}
std::cout<<"Analytical Solution = "<< Ianalytical <<std::endl;
std::cout<<"-----------------------------"<<std::endl;
//************************//
// PARALLEL RUN //
//***********************//
std::cout<<"Parallel Case using method "<<Method<<": "<<std::endl;
std::cout<< "Number of blocks = " << N_blocks <<std::endl;
std::cout <<"Number of threads per block = " << N_threads<<std::endl;
std::cout<< "Number of Iterations per thread = " << thread_max_iterations << std::endl;
std::cout<<"Total number of Evaluations = "<<max_evaluations<<std::endl;
std::cout<<"Dimension = "<<dimension-1<<std::endl;
std::cout<<"-----------------------------"<<std::endl;
//---------------------------------------
//Initial Setup (Check the Block and grid sizes)
//---------------------------------------
//Get Device properties
cudaDeviceProp device_properties;
cudaGetDeviceProperties (&device_properties, 0);
if (N_threads>device_properties.maxThreadsDim[0])
{
std::cerr << "Maximum threads for dimension 0 = " << device_properties.maxThreadsDim[0] << std::endl;
return -1;
}
if(N_blocks>device_properties.maxGridSize[0])
{
std::cerr << "Maximum grid dimension 0 = " << device_properties.maxGridSize[0] << std::endl;
return -1;
}
//---------------------------------------
// Setup Profiling
//---------------------------------------
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//I_val: Final Estimated Value of the Integral
float I_val=0;
//Pointers to data on the device
float *devDegrees;
float *dev_I_val;
curandState *devStates;
//seed the host random number generator to get a random seed for the Curand
srand(clock());
//Random seed to be used for Curand
long int seed = rand();
cudaError_t err;
//Allocate memory for A,B,C on device
//Pass the address of a pointer where the malloc function will write the address of the data in it
//it have to be casted to a void pointer
if (Method!=2)
{err = cudaMalloc( (void **)&devStates, N_blocks*N_threads * sizeof(curandState) );assert(err == cudaSuccess);}
err = cudaMalloc((void**)&devDegrees,degrees.size()*sizeof(float));assert(err == cudaSuccess);
err = cudaMalloc((void**)&dev_I_val,sizeof(float));assert(err == cudaSuccess);
//Copy the data to the device
// CudaMemcpy(TO_ADDRESS, FROM_ADDRESS, NUMBER_OF_BYTES, DIRECTION)
//Where the direction is either cudaMemcpyHostToDevice or cudaMemcpyDeviceToHost
err = cudaMemcpy( devDegrees,°rees[0],degrees.size()*sizeof(float),cudaMemcpyHostToDevice);assert(err == cudaSuccess);
err = cudaMemcpy( dev_I_val,&I_val,sizeof(float),cudaMemcpyHostToDevice);assert(err == cudaSuccess);
//RUN THE KERNEL
if(Method==1)
{
MC_Integratev1<<<N_blocks,N_threads>>>(devDegrees,dimension,n_terms,dev_I_val,devStates,seed,thread_max_iterations);
}
else if (Method ==2)
{
MC_Integratev2<<<N_blocks,N_threads,(1+(2+n_terms)*dimension)*sizeof(float)>>>(devDegrees,dimension,n_terms,dev_I_val,seed,thread_max_iterations);
}
else
{
std::cerr<<"Please enter a valid method"<<std::endl;
cudaFree(devDegrees);
cudaFree(dev_I_val);
return -1;
}
//Copy the result to the Host
err =cudaMemcpy(&I_val,dev_I_val,sizeof(float),cudaMemcpyDeviceToHost);assert(err == cudaSuccess);
//FREE MEMORY ON DEVICE
cudaFree(devDegrees);
cudaFree(dev_I_val);
if (Method!=2)
{cudaFree(devStates);}
//Multiply by the Volume
float a,b;
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
I_val*=(b-a);
}
//Divide by the total number of evaluations
I_val/=(float)N_blocks;
I_val/=(float)N_threads;
I_val/=(float)thread_max_iterations;
//---------------------------------------
// Stop Profiling
//---------------------------------------
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float gpu_time;
cudaEventElapsedTime(&gpu_time, start, stop); //time in milliseconds
gpu_time /= 1000.0;
std::cout<<"GPU Results: "<<std::endl;
std::cout <<"I = " << I_val << ", GPU time = "<<gpu_time<<std::endl;
//******************//
// SERIAL RUN //
//*****************//
if (Validate==1||Validate==2)
{
std::cout<<"-----------------------------"<<std::endl;
std::cout<<"Host Results: "<<std::endl;
double t_start_cpu = (double)clock()/(double)CLOCKS_PER_SEC;
//Set f_0_s to hold the coefficients of the polynomial terms
std::vector<double> f_0_s (n_terms,0);
for (int term_i=0;term_i<n_terms;term_i++)
{
f_0_s[term_i]=degrees[(2+term_i)*dimension];
}
srand(clock()); //seed the random number generator
long int N = 0;
double x;
double I = 0.0;
double a,b;
std::vector<double> f (n_terms,0);
do
{
//Initialize f with the coefficients
f=f_0_s;
for (int d=1;d<dimension;d++)
{
//Generate a random number in the range of the limits of this dimension
x = (double)rand()/(double)RAND_MAX; //x between 0 and 1
//limits
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
x = x*(b-a) + a; //x between a2 and b2
for (int term_i=0;term_i<n_terms;term_i++)
{
//2: first 2 lines are the limits
f[term_i]*=pow(x,degrees[(2+term_i)*dimension+d]);
}
}
for (int term_i=0;term_i<n_terms;term_i++)
{
I+=f[term_i];
}
N++;
}
while (N <= max_evaluations);
//Multiply by the Volume
for (int d=1;d<dimension;d++)
{
a= degrees[0*dimension+d];
b= degrees[1*dimension+d];
I*=(b-a);
}
I/=(double)N;
double t_stop_cpu = (double)clock()/(double)CLOCKS_PER_SEC;
double cpu_time=t_stop_cpu-t_start_cpu;
std::cout <<"I = " << I << ", Host time = "<<cpu_time<<std::endl;
std::cout<<"Speed up = "<<cpu_time/gpu_time<<std::endl;
}
}
|
362627d6e469d6f6720b4963c80e9ffde8f186cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) -> bool {
return !a;
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
}}
| 362627d6e469d6f6720b4963c80e9ffde8f186cd.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
void bitwise_not_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
void logical_not_kernel_cuda(TensorIterator& iter) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) -> bool {
return !a;
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "neg_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
}}
|
350abf2ed44485475fdf94da5516c4970f1565ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/hip/HIPTensorMethods.cuh"
#include "ATen/hip/HIPTypeConversion.cuh"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel), dim3(grid), dim3(block), 0, stream,
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| 350abf2ed44485475fdf94da5516c4970f1565ec.cu | #include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include "ATen/cuda/CUDATypeConversion.cuh"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_feature_kernel<<<grid, block, 0, stream>>>(
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
ee6459e665af391f7c8ff1d06e22e6d06c58a442.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) C[i] = A[i] + B[i];
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
dim3 dim_grid, dim_block;
dim_block = BLOCK_SIZE;
dim_grid = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( VecAdd), dim3(dim_grid), dim3(dim_block), 0, 0, n, A, B, C);
}
| ee6459e665af391f7c8ff1d06e22e6d06c58a442.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) C[i] = A[i] + B[i];
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
dim3 dim_grid, dim_block;
dim_block = BLOCK_SIZE;
dim_grid = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
VecAdd<<<dim_grid, dim_block>>>(n, A, B, C);
}
|
6b3487146d09cb3ee606c889317bb2ef987e86ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//2019 Eric Johnson
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "helper_cuda.h"
#include "cuda_computation_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific defines
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
inline int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__global__ void GPU_DLL_DEMO_kernelCompute(
int* d_input,
int* d_output,
const int adder,
const int length
)
{
const int n = (threadIdx.x + blockIdx.x * blockDim.x);
if (n > length){//handle out of rangeP
return;
}
d_output[n] = d_input[n] + adder;
}
extern "C" void GPU_DLL_DEMO_GPU(
int* d_input,
int* d_output,
const int adder,
const int length
)
{
hipFuncSetCacheConfig(GPU_DLL_DEMO_kernelCompute, hipFuncCachePreferL1);
dim3 threads(blocksize);
dim3 blocks(iDivUp(length, threads.x));
GPU_DLL_DEMO_kernelCompute << <blocks, threads>> >(
d_input,
d_output,
adder,
length
);
getLastCudaError("Kernel() execution failed\n");
}
| 6b3487146d09cb3ee606c889317bb2ef987e86ff.cu | //2019 Eric Johnson
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "helper_cuda.h"
#include "cuda_computation_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific defines
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
inline int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
__global__ void GPU_DLL_DEMO_kernelCompute(
int* d_input,
int* d_output,
const int adder,
const int length
)
{
const int n = (threadIdx.x + blockIdx.x * blockDim.x);
if (n > length){//handle out of rangeP
return;
}
d_output[n] = d_input[n] + adder;
}
extern "C" void GPU_DLL_DEMO_GPU(
int* d_input,
int* d_output,
const int adder,
const int length
)
{
cudaFuncSetCacheConfig(GPU_DLL_DEMO_kernelCompute, cudaFuncCachePreferL1);
dim3 threads(blocksize);
dim3 blocks(iDivUp(length, threads.x));
GPU_DLL_DEMO_kernelCompute << <blocks, threads>> >(
d_input,
d_output,
adder,
length
);
getLastCudaError("Kernel() execution failed\n");
}
|
be29a90c2a878fea619590f547958fb1fc4d776e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "book.h"
#include "cuda_bridge.h"
#define BLOCK_SIZE 16
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
#pragma unroll
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp % 512;
}
}
int *Md = NULL, *Nd = NULL, *Pd = NULL;
void MatrixMultiplication(int *&M, int *&N, int *&P, int Width) {
int size = Width * Width * sizeof(int);
// allocate memory on the GPU
//if(Md == NULL) {
//std::cout << "hipMalloc" << std::endl;
HANDLE_ERROR( hipMalloc((void**)&Md, size) );
//} else {
//std::cout << "ALLOCATED" << std::endl;
//}
//if(Nd == NULL)
HANDLE_ERROR( hipMalloc((void**)&Nd, size) );
//if(Pd == NULL)
HANDLE_ERROR( hipMalloc((void**)&Pd, size) );
// transfer M and N to device memory
HANDLE_ERROR( hipMemcpy(Md, M, size, hipMemcpyHostToDevice) );
HANDLE_ERROR( hipMemcpy(Nd, N, size, hipMemcpyHostToDevice) );
unsigned int grid_rows = (Width + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (Width + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//std::cout << "Width: " << Width << std::endl;
//std::cout << "Width/32: " << Width/32 << std::endl;
//Kernel<<<dimGrid, dimBlock>>>( Md, Nd, Pd, Width);
hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, Width);
//std::cout << "Width/32: " << Width/32 << std::endl;
// transfer P from device
//std::cout << "P: " << P[0] << std::endl;
HANDLE_ERROR( hipMemcpy(P,Pd,size,hipMemcpyDeviceToHost) );
hipDeviceSynchronize();
HANDLE_ERROR( hipFree(Md) );
HANDLE_ERROR( hipFree(Nd) );
HANDLE_ERROR( hipFree(Pd) );
}
| be29a90c2a878fea619590f547958fb1fc4d776e.cu | #include <iostream>
#include <cuda_runtime.h>
#include "book.h"
#include "cuda_bridge.h"
#define BLOCK_SIZE 16
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
#pragma unroll
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp % 512;
}
}
int *Md = NULL, *Nd = NULL, *Pd = NULL;
void MatrixMultiplication(int *&M, int *&N, int *&P, int Width) {
int size = Width * Width * sizeof(int);
// allocate memory on the GPU
//if(Md == NULL) {
//std::cout << "cudaMalloc" << std::endl;
HANDLE_ERROR( cudaMalloc((void**)&Md, size) );
//} else {
//std::cout << "ALLOCATED" << std::endl;
//}
//if(Nd == NULL)
HANDLE_ERROR( cudaMalloc((void**)&Nd, size) );
//if(Pd == NULL)
HANDLE_ERROR( cudaMalloc((void**)&Pd, size) );
// transfer M and N to device memory
HANDLE_ERROR( cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice) );
unsigned int grid_rows = (Width + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (Width + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//std::cout << "Width: " << Width << std::endl;
//std::cout << "Width/32: " << Width/32 << std::endl;
//Kernel<<<dimGrid, dimBlock>>>( Md, Nd, Pd, Width);
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
//std::cout << "Width/32: " << Width/32 << std::endl;
// transfer P from device
//std::cout << "P: " << P[0] << std::endl;
HANDLE_ERROR( cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost) );
cudaThreadSynchronize();
HANDLE_ERROR( cudaFree(Md) );
HANDLE_ERROR( cudaFree(Nd) );
HANDLE_ERROR( cudaFree(Pd) );
}
|
f0551dc6330c429c7046ff50dc004ce63892d321.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <bits/stdc++.h>
#include <cassert>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int ARRAY_SIZE = 1E9;
__global__ void initKernel(){
return;
}
__global__ void naiveKernel(int N, float *input, float *output){
float res = 0.;
for(int i=0;i<N;++i) res += input[i];
*output = res/N;
}
__global__ void thrustKernel(int N, float *input, float *output){
float res = thrust::reduce(thrust::device, input, input + N);
*output = res/N;
}
int main(){
hipEvent_t start, stop;
float cpu_time, gpu_time1, gpu_time2;
hipEventCreate(&start); // creating the event 1
hipEventCreate(&stop); // creating the event 2
hipLaunchKernelGGL(( initKernel), dim3(1),dim3(1), 0, 0, );
hipLaunchKernelGGL(( initKernel), dim3(1),dim3(1), 0, 0, );
hipLaunchKernelGGL(( initKernel), dim3(1),dim3(1), 0, 0, );
for(int N = 2; N<=ARRAY_SIZE ; N*=2){
float ans = 0.;
vector<float> input(N);
def_dvec(float) dev_in(N), dev_ans1(1, 0.), dev_ans2(1,0.);
generate(input.begin(), input.end(), [](){return float(rand())/RAND_MAX;});
gpu_copy(input, dev_in);
// Using CPU to compute the average
clock_t t_start = clock();
ans = accumulate(input.begin(), input.end(), 0.)/N;
cpu_time = float(clock() - t_start)/CLOCKS_PER_SEC;
// Using the naive kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( naiveKernel), dim3(1), dim3(1), 0, 0, N, to_ptr(dev_in), to_ptr(dev_ans1));
hipEventRecord(stop, 0); // Stop time measuring
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time1, start, stop);
gpu_time1/=1000.;
// Using the thrust kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( thrustKernel), dim3(1), dim3(1), 0, 0, N, to_ptr(dev_in), to_ptr(dev_ans2));
hipEventRecord(stop, 0); // Stop time measuring
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time2, start, stop);
gpu_time2 /= 1000.;
// output results
cout<< '[' << N<<','<<cpu_time<<','<<gpu_time1<<','<<gpu_time2<<',';
cout<< ans <<','<<dev_ans1[0]<<','<<dev_ans2[0]<<"],"<<endl;
}
return 0;
}
| f0551dc6330c429c7046ff50dc004ce63892d321.cu | #include <bits/stdc++.h>
#include <cassert>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int ARRAY_SIZE = 1E9;
__global__ void initKernel(){
return;
}
__global__ void naiveKernel(int N, float *input, float *output){
float res = 0.;
for(int i=0;i<N;++i) res += input[i];
*output = res/N;
}
__global__ void thrustKernel(int N, float *input, float *output){
float res = thrust::reduce(thrust::device, input, input + N);
*output = res/N;
}
int main(){
cudaEvent_t start, stop;
float cpu_time, gpu_time1, gpu_time2;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
initKernel<<<1,1>>>();
initKernel<<<1,1>>>();
initKernel<<<1,1>>>();
for(int N = 2; N<=ARRAY_SIZE ; N*=2){
float ans = 0.;
vector<float> input(N);
def_dvec(float) dev_in(N), dev_ans1(1, 0.), dev_ans2(1,0.);
generate(input.begin(), input.end(), [](){return float(rand())/RAND_MAX;});
gpu_copy(input, dev_in);
// Using CPU to compute the average
clock_t t_start = clock();
ans = accumulate(input.begin(), input.end(), 0.)/N;
cpu_time = float(clock() - t_start)/CLOCKS_PER_SEC;
// Using the naive kernel
cudaEventRecord(start, 0);
naiveKernel<<<1, 1>>>(N, to_ptr(dev_in), to_ptr(dev_ans1));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time1, start, stop);
gpu_time1/=1000.;
// Using the thrust kernel
cudaEventRecord(start, 0);
thrustKernel<<<1, 1>>>(N, to_ptr(dev_in), to_ptr(dev_ans2));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time2, start, stop);
gpu_time2 /= 1000.;
// output results
cout<< '[' << N<<','<<cpu_time<<','<<gpu_time1<<','<<gpu_time2<<',';
cout<< ans <<','<<dev_ans1[0]<<','<<dev_ans2[0]<<"],"<<endl;
}
return 0;
}
|
06a890ac5620623a1133185ea092f49be4eae9e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define NUM_ELEM 8
#define NUM_THREADS 10
using namespace std;
__global__ void concurrentRW(int *data) {
// NUM_THREADS try to read and write at same location
// data[blockIdx.x] = data[blockIdx.x] + threadIdx.x;
atomicAdd(&data[blockIdx.x], threadIdx.x);
}
int main(int argc, char *argv[]) {
int* data = NULL;
bool errorsDetected = false;
hipMallocManaged(&data, NUM_ELEM * sizeof(unsigned long long int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// init all elements to 0
hipMemset(data, 0, NUM_ELEM);
// launch kernel writes
hipLaunchKernelGGL(( concurrentRW), dim3(NUM_ELEM), dim3(NUM_THREADS), 0, 0, data);
hipDeviceSynchronize();
if (hipSuccess != hipGetLastError()) {
return 1;
}
for(int i = 0; i < NUM_ELEM; i++) {
cout << i << ". " << data[i] << endl;
if(data[i] != (NUM_THREADS * (NUM_THREADS - 1) / 2)) {
errorsDetected = true;
}
}
if(errorsDetected) {
cout << "Errors detected" << endl;
} else {
cout << "OK" << endl;
}
return 0;
} | 06a890ac5620623a1133185ea092f49be4eae9e6.cu | #include <iostream>
#define NUM_ELEM 8
#define NUM_THREADS 10
using namespace std;
__global__ void concurrentRW(int *data) {
// NUM_THREADS try to read and write at same location
// data[blockIdx.x] = data[blockIdx.x] + threadIdx.x;
atomicAdd(&data[blockIdx.x], threadIdx.x);
}
int main(int argc, char *argv[]) {
int* data = NULL;
bool errorsDetected = false;
cudaMallocManaged(&data, NUM_ELEM * sizeof(unsigned long long int));
if (data == 0) {
cout << "[HOST] Couldn't allocate memory\n";
return 1;
}
// init all elements to 0
cudaMemset(data, 0, NUM_ELEM);
// launch kernel writes
concurrentRW<<<NUM_ELEM, NUM_THREADS>>>(data);
cudaDeviceSynchronize();
if (cudaSuccess != cudaGetLastError()) {
return 1;
}
for(int i = 0; i < NUM_ELEM; i++) {
cout << i << ". " << data[i] << endl;
if(data[i] != (NUM_THREADS * (NUM_THREADS - 1) / 2)) {
errorsDetected = true;
}
}
if(errorsDetected) {
cout << "Errors detected" << endl;
} else {
cout << "OK" << endl;
}
return 0;
} |
b6969592e28c5fab49cfff776d7ddc481cbff897.hip | // !!! This is a file automatically generated by hipify!!!
#include "defines.h"
#include "definesCuda.cuh"
#include <string.h>
#include "lib.cuh"
#include <hip/hip_runtime.h>
#include <rocblas.h>
/*
Calculate covariance matriz using double type
*/
__device__ void covarm(const REAL * __restrict__ w,const REAL * __restrict__ w_d,const REAL sig,const float * __restrict__ spectro,int nspectro,const REAL * __restrict__ spectra,const REAL * __restrict__ d_spectra,PRECISION *beta,REAL *alpha,ProfilesMemory * pM, const int nterms){
int j,i,k;
int h;
REAL sum,sum2;
REAL *BTaux,*APaux;
for(j=0;j<NPARMS;j++){
REAL w_aux = w[j];
REAL w_d_aux = w_d[j];
BTaux=pM->BT+(j*nterms);
APaux=pM->AP+(j*nterms*nterms);
for ( i = 0; i < nterms; i++){
//#pragma unroll
for ( h = 0; h < nterms; h++){
sum=0;
if(i==0)
sum2=0;
for ( k = 0; k < nspectro; k++){
REAL dAux = __ldg((d_spectra+(j*nspectro*nterms)+(h*nspectro)+k));
sum += __ldg(d_spectra+(j*nspectro*nterms)+(i*nspectro)+k) * dAux;
if(i==0){
sum2 += (w_aux*( __ldg(spectra+k+nspectro*j)-__ldg(spectro+k+nspectro*j) )) * dAux;
}
}
APaux[(nterms)*i+h] = (sum)*w_d_aux;
if(i==0){
BTaux[h] = __fdividef(sum2,sig);
}
}
}
}
REAL sum3,sum4;
#pragma unroll
for(i=0;i<nterms;i++){
sum=pM->BT[i];
sum2=pM->BT[nterms+i];
sum3=pM->BT[2*nterms+i];
sum4=pM->BT[3*nterms+i];
beta[i] = sum + sum2 + sum3 + sum4;
}
totalParcialMatrixf(pM->AP,nterms,nterms,NPARMS,alpha); //alpha de tam nterms x nterms
}
/*
Calculate covariance matriz using float type
*/
__device__ void covarmf(const REAL * __restrict__ w,const REAL * __restrict__ w_d,const REAL sig,const float * __restrict__ spectro,int nspectro,const REAL * __restrict__ spectra,const REAL * __restrict__ d_spectra,REAL *beta,REAL *alpha,ProfilesMemory * pM,const int nterms){
int j,i,k;
int h;
REAL sum;
REAL sum2;
REAL *APaux;
REAL *BTaux;
/*for(i=0;i<nterms;i++)
beta[i]=0;*/
for(j=0;j<NPARMS;j++){
REAL w_aux = w[j];
REAL w_d_aux = w_d[j];
BTaux=pM->BT+(j*nterms);
APaux=pM->AP+(j*nterms*nterms);
for ( i = 0; i < nterms; i++){
for ( h = 0; h < nterms; h++){
sum=0;
if(i==0)
sum2=0;
for ( k = 0; k < nspectro; k++){
REAL dAux = __ldg((d_spectra+(j*nspectro*nterms)+(h*nspectro)+k));
sum += __ldg(d_spectra+(j*nspectro*nterms)+(i*nspectro)+k) * dAux;
if(i==0){
sum2 += (w_aux*( __ldg(spectra+k+nspectro*j)-__ldg(spectro+k+nspectro*j) )) * dAux;
}
}
APaux[(nterms)*i+h] = (sum)*w_d_aux;
if(i==0){
BTaux[h] = __fdividef(sum2,sig);
}
}
}
}
REAL sum3,sum4;
#pragma unroll
for(i=0;i<nterms;i++){
sum=pM->BT[i];
sum2=pM->BT[nterms+i];
sum3=pM->BT[2*nterms+i];
sum4=pM->BT[3*nterms+i];
beta[i] = sum + sum2 + sum3 + sum4;
}
totalParcialMatrixf(pM->AP,nterms,nterms,NPARMS,alpha); //alpha de tam nterms x nterms
}
__device__ REAL fchisqr(const REAL * __restrict__ spectra,const int nspectro,const float * __restrict__ spectro, const REAL * w, const REAL sig, const REAL nfree){
REAL TOT,dif1,dif2,dif3,dif4;
REAL opa1,opa2,opa3,opa4;
int i;
TOT=0;
opa1=0;
opa2=0;
opa3=0;
opa4=0;
REAL w_0 = w[0];
REAL w_1 = w[1];
REAL w_2 = w[2];
REAL w_3 = w[3];
for(i=0;i<nspectro;i++){
dif1=spectra[i]-spectro[i];
dif2=spectra[i+nspectro]-spectro[i+nspectro];
dif3=spectra[i+nspectro*2]-spectro[i+nspectro*2];
dif4=spectra[i+nspectro*3]-spectro[i+nspectro*3];
opa1+= __fdividef(((dif1*dif1)*w_0),(sig));
opa2+= __fdividef(((dif2*dif2)*w_1),(sig));
opa3+= __fdividef(((dif3*dif3)*w_2),(sig));
opa4+= __fdividef(((dif4*dif4)*w_3),(sig));
}
TOT= opa1+opa2+opa3+opa4;
return TOT/nfree;
}
/*
Multiplica la matriz a (tamao naf,nac)
por la matriz b (de tamao nbf,nbc)
al estilo IDL, es decir, filas de a por columnas de b,
el resultado se almacena en resultOut (de tamao fil,col)
El tamao de salida (fil,col) corresponde con (nbf,nac).
El tamao de columnas de b, nbc, debe de ser igual al de filas de a, naf.
*/
__device__ void multmatrixIDLValue(const REAL *a,int naf,int nac,const REAL *b,int nbf,int nbc,REAL *result,REAL value,const int nterms){
int i,k;
REAL sum;
for ( i = 0; i < nterms; i++){
sum=0;
for ( k = 0; k < naf; k++){
//printf("i: %d,j:%d,k=%d .. a[%d][%d]:%f .. b[%d][%d]:%f\n",i,j,k,k,j,a[k*nac+j],i,k,b[i*nbc+k]);
sum += a[k] * b[i*nbc+k];
}
result[i] = sum/value;
}
}
/**
*
* @param A --> Matrix of size fxc
* @param f --> num of Rows of A --> We assume that f will be always 4.
* @param c --> num of Cols of A
* @param result --> Array of size c
*
* Method to realize the sum of A by columns and store the results of each sum in array result.
* */
__device__ void totalParcialf(const REAL * __restrict__ A, int f,int c,PRECISION * result){
int i;
REAL sum,sum2,sum3,sum4;
#pragma unroll
for(i=0;i<c;i++){
sum=A[i];
sum2=A[c+i];
sum3=A[2*c+i];
sum4=A[3*c+i];
result[i] = sum + sum2 + sum3 + sum4;
}
}
/**
* @param A --> Matrix of tam fXcXp 3D
* @param f --> num of rows of A and result
* @param c --> num of cols of A and result
* @param p --> num of depth of A
* @param result --> Matrix to store the result. Size fXc
*
* Method to realize the sumatory in the axis depth for each element en (x,y) of A and store this sumatory in result(x,y)
* */
__device__ void totalParcialMatrixf(const REAL * __restrict__ A, int f,int c,int p,REAL * result){
int i,j;
REAL sum,sum2,sum3,sum4;
#pragma unroll
for(i=0;i<f;i++)
#pragma unroll
for(j=0;j<c;j++){
sum = A[i*c+j];
sum2 = A[i*c+j+f*c];
sum3 = A[i*c+j+f*c*2];
sum4 = A[i*c+j+f*c*3];
result[i*c+j] = sum + sum2 + sum3 + sum4;
}
// return result;
}
/*
Multiplica la matriz a (tamao naf,nac)
por la matriz b (de tamao nbf,nbc)
al estilo multiplicacin algebraica de matrices, es decir, columnas de a por filas de b,
el resultado se almacena en resultOut (de tamao fil,col)
El tamao de salida (fil,col) corresponde con (nbf,nac).
El tamao de columnas de a, nac, debe de ser igual al de filas de b, nbf.
*/
__device__ int multmatrix(PRECISION *a,int naf,int nac, PRECISION *b,int nbf,int nbc,PRECISION *result){
int i,j,k;
PRECISION sum;
for ( i = 0; i < naf; i++)
for ( j = 0; j < nbc; j++){
sum=0;
#pragma unroll
for ( k = 0; k < nbf; k++){
// printf("i: %d,j:%d,k=%d .. a[%d][%d] .. b[%d][%d]\n",i,j,k,i,k,k,j);
sum += a[i*nac+k] * b[k*nbc+j];
}
// printf("Sum\n");
result[(nbc)*i+j] = sum;
}
return 1;
}
__device__ int multmatrix_transpose(const REAL *a,int naf,int nac,const REAL *b,int nbf,int nbc,REAL *result,int *fil,int *col,REAL value){
int i,j,k;
REAL sum;
if(nac==nbc){
(*fil)=naf;
(*col)=nbf;
for ( i = 0; i < naf; i++){
for ( j = 0; j < nbf; j++){
sum=0;
for ( k = 0; k < nbc; k++){
sum += a[i*nac+k] * b[j*nbc+k];
}
result[(*col)*i+j] = (sum)*value;
}
}
return 1;
}else{
printf("\n \n Error en multmatrix_transpose no coinciden nac y nbc!!!! ..\n\n");
}
return 0;
}
__global__ void d_multmatrix_transpose(const REAL * __restrict__ a,int naf,int nac,const REAL * __restrict__ b,int nbf,int nbc,REAL * __restrict__ result,const REAL value){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row
int j = blockIdx.x * blockDim.x + threadIdx.y; // col
int k;
REAL sum=0;
extern __shared__ REAL d_a[];
//#pragma unroll
//for ( k = 0; k < nbc; k++){
//#pragma unroll
for ( k = j; k < nbc; k=k+(nbc/nbf)){
d_a[i*nbc+k] = a[i*nbc+k];
}
__syncthreads();
#pragma unroll
for ( k = 0; k < nbc; k++){
sum += (d_a[i*nac+k] * d_a[j*nbc+k]);
}
result[(nbf)*i+j] = (sum)*value;
}
| b6969592e28c5fab49cfff776d7ddc481cbff897.cu | #include "defines.h"
#include "definesCuda.cuh"
#include <string.h>
#include "lib.cuh"
#include <cuda_runtime.h>
#include <cublas_v2.h>
/*
Calculate covariance matriz using double type
*/
__device__ void covarm(const REAL * __restrict__ w,const REAL * __restrict__ w_d,const REAL sig,const float * __restrict__ spectro,int nspectro,const REAL * __restrict__ spectra,const REAL * __restrict__ d_spectra,PRECISION *beta,REAL *alpha,ProfilesMemory * pM, const int nterms){
int j,i,k;
int h;
REAL sum,sum2;
REAL *BTaux,*APaux;
for(j=0;j<NPARMS;j++){
REAL w_aux = w[j];
REAL w_d_aux = w_d[j];
BTaux=pM->BT+(j*nterms);
APaux=pM->AP+(j*nterms*nterms);
for ( i = 0; i < nterms; i++){
//#pragma unroll
for ( h = 0; h < nterms; h++){
sum=0;
if(i==0)
sum2=0;
for ( k = 0; k < nspectro; k++){
REAL dAux = __ldg((d_spectra+(j*nspectro*nterms)+(h*nspectro)+k));
sum += __ldg(d_spectra+(j*nspectro*nterms)+(i*nspectro)+k) * dAux;
if(i==0){
sum2 += (w_aux*( __ldg(spectra+k+nspectro*j)-__ldg(spectro+k+nspectro*j) )) * dAux;
}
}
APaux[(nterms)*i+h] = (sum)*w_d_aux;
if(i==0){
BTaux[h] = __fdividef(sum2,sig);
}
}
}
}
REAL sum3,sum4;
#pragma unroll
for(i=0;i<nterms;i++){
sum=pM->BT[i];
sum2=pM->BT[nterms+i];
sum3=pM->BT[2*nterms+i];
sum4=pM->BT[3*nterms+i];
beta[i] = sum + sum2 + sum3 + sum4;
}
totalParcialMatrixf(pM->AP,nterms,nterms,NPARMS,alpha); //alpha de tam nterms x nterms
}
/*
Calculate covariance matriz using float type
*/
__device__ void covarmf(const REAL * __restrict__ w,const REAL * __restrict__ w_d,const REAL sig,const float * __restrict__ spectro,int nspectro,const REAL * __restrict__ spectra,const REAL * __restrict__ d_spectra,REAL *beta,REAL *alpha,ProfilesMemory * pM,const int nterms){
int j,i,k;
int h;
REAL sum;
REAL sum2;
REAL *APaux;
REAL *BTaux;
/*for(i=0;i<nterms;i++)
beta[i]=0;*/
for(j=0;j<NPARMS;j++){
REAL w_aux = w[j];
REAL w_d_aux = w_d[j];
BTaux=pM->BT+(j*nterms);
APaux=pM->AP+(j*nterms*nterms);
for ( i = 0; i < nterms; i++){
for ( h = 0; h < nterms; h++){
sum=0;
if(i==0)
sum2=0;
for ( k = 0; k < nspectro; k++){
REAL dAux = __ldg((d_spectra+(j*nspectro*nterms)+(h*nspectro)+k));
sum += __ldg(d_spectra+(j*nspectro*nterms)+(i*nspectro)+k) * dAux;
if(i==0){
sum2 += (w_aux*( __ldg(spectra+k+nspectro*j)-__ldg(spectro+k+nspectro*j) )) * dAux;
}
}
APaux[(nterms)*i+h] = (sum)*w_d_aux;
if(i==0){
BTaux[h] = __fdividef(sum2,sig);
}
}
}
}
REAL sum3,sum4;
#pragma unroll
for(i=0;i<nterms;i++){
sum=pM->BT[i];
sum2=pM->BT[nterms+i];
sum3=pM->BT[2*nterms+i];
sum4=pM->BT[3*nterms+i];
beta[i] = sum + sum2 + sum3 + sum4;
}
totalParcialMatrixf(pM->AP,nterms,nterms,NPARMS,alpha); //alpha de tam nterms x nterms
}
__device__ REAL fchisqr(const REAL * __restrict__ spectra,const int nspectro,const float * __restrict__ spectro, const REAL * w, const REAL sig, const REAL nfree){
REAL TOT,dif1,dif2,dif3,dif4;
REAL opa1,opa2,opa3,opa4;
int i;
TOT=0;
opa1=0;
opa2=0;
opa3=0;
opa4=0;
REAL w_0 = w[0];
REAL w_1 = w[1];
REAL w_2 = w[2];
REAL w_3 = w[3];
for(i=0;i<nspectro;i++){
dif1=spectra[i]-spectro[i];
dif2=spectra[i+nspectro]-spectro[i+nspectro];
dif3=spectra[i+nspectro*2]-spectro[i+nspectro*2];
dif4=spectra[i+nspectro*3]-spectro[i+nspectro*3];
opa1+= __fdividef(((dif1*dif1)*w_0),(sig));
opa2+= __fdividef(((dif2*dif2)*w_1),(sig));
opa3+= __fdividef(((dif3*dif3)*w_2),(sig));
opa4+= __fdividef(((dif4*dif4)*w_3),(sig));
}
TOT= opa1+opa2+opa3+opa4;
return TOT/nfree;
}
/*
Multiplica la matriz a (tamaño naf,nac)
por la matriz b (de tamaño nbf,nbc)
al estilo IDL, es decir, filas de a por columnas de b,
el resultado se almacena en resultOut (de tamaño fil,col)
El tamaño de salida (fil,col) corresponde con (nbf,nac).
El tamaño de columnas de b, nbc, debe de ser igual al de filas de a, naf.
*/
__device__ void multmatrixIDLValue(const REAL *a,int naf,int nac,const REAL *b,int nbf,int nbc,REAL *result,REAL value,const int nterms){
int i,k;
REAL sum;
for ( i = 0; i < nterms; i++){
sum=0;
for ( k = 0; k < naf; k++){
//printf("i: %d,j:%d,k=%d .. a[%d][%d]:%f .. b[%d][%d]:%f\n",i,j,k,k,j,a[k*nac+j],i,k,b[i*nbc+k]);
sum += a[k] * b[i*nbc+k];
}
result[i] = sum/value;
}
}
/**
*
* @param A --> Matrix of size fxc
* @param f --> num of Rows of A --> We assume that f will be always 4.
* @param c --> num of Cols of A
* @param result --> Array of size c
*
* Method to realize the sum of A by columns and store the results of each sum in array result.
* */
__device__ void totalParcialf(const REAL * __restrict__ A, int f,int c,PRECISION * result){
int i;
REAL sum,sum2,sum3,sum4;
#pragma unroll
for(i=0;i<c;i++){
sum=A[i];
sum2=A[c+i];
sum3=A[2*c+i];
sum4=A[3*c+i];
result[i] = sum + sum2 + sum3 + sum4;
}
}
/**
* @param A --> Matrix of tam fXcXp 3D
* @param f --> num of rows of A and result
* @param c --> num of cols of A and result
* @param p --> num of depth of A
* @param result --> Matrix to store the result. Size fXc
*
* Method to realize the sumatory in the axis depth for each element en (x,y) of A and store this sumatory in result(x,y)
* */
__device__ void totalParcialMatrixf(const REAL * __restrict__ A, int f,int c,int p,REAL * result){
int i,j;
REAL sum,sum2,sum3,sum4;
#pragma unroll
for(i=0;i<f;i++)
#pragma unroll
for(j=0;j<c;j++){
sum = A[i*c+j];
sum2 = A[i*c+j+f*c];
sum3 = A[i*c+j+f*c*2];
sum4 = A[i*c+j+f*c*3];
result[i*c+j] = sum + sum2 + sum3 + sum4;
}
// return result;
}
/*
Multiplica la matriz a (tamaño naf,nac)
por la matriz b (de tamaño nbf,nbc)
al estilo multiplicación algebraica de matrices, es decir, columnas de a por filas de b,
el resultado se almacena en resultOut (de tamaño fil,col)
El tamaño de salida (fil,col) corresponde con (nbf,nac).
El tamaño de columnas de a, nac, debe de ser igual al de filas de b, nbf.
*/
__device__ int multmatrix(PRECISION *a,int naf,int nac, PRECISION *b,int nbf,int nbc,PRECISION *result){
int i,j,k;
PRECISION sum;
for ( i = 0; i < naf; i++)
for ( j = 0; j < nbc; j++){
sum=0;
#pragma unroll
for ( k = 0; k < nbf; k++){
// printf("i: %d,j:%d,k=%d .. a[%d][%d] .. b[%d][%d]\n",i,j,k,i,k,k,j);
sum += a[i*nac+k] * b[k*nbc+j];
}
// printf("Sum\n");
result[(nbc)*i+j] = sum;
}
return 1;
}
__device__ int multmatrix_transpose(const REAL *a,int naf,int nac,const REAL *b,int nbf,int nbc,REAL *result,int *fil,int *col,REAL value){
int i,j,k;
REAL sum;
if(nac==nbc){
(*fil)=naf;
(*col)=nbf;
for ( i = 0; i < naf; i++){
for ( j = 0; j < nbf; j++){
sum=0;
for ( k = 0; k < nbc; k++){
sum += a[i*nac+k] * b[j*nbc+k];
}
result[(*col)*i+j] = (sum)*value;
}
}
return 1;
}else{
printf("\n \n Error en multmatrix_transpose no coinciden nac y nbc!!!! ..\n\n");
}
return 0;
}
__global__ void d_multmatrix_transpose(const REAL * __restrict__ a,int naf,int nac,const REAL * __restrict__ b,int nbf,int nbc,REAL * __restrict__ result,const REAL value){
int i = blockIdx.x * blockDim.x + threadIdx.x; // row
int j = blockIdx.x * blockDim.x + threadIdx.y; // col
int k;
REAL sum=0;
extern __shared__ REAL d_a[];
//#pragma unroll
//for ( k = 0; k < nbc; k++){
//#pragma unroll
for ( k = j; k < nbc; k=k+(nbc/nbf)){
d_a[i*nbc+k] = a[i*nbc+k];
}
__syncthreads();
#pragma unroll
for ( k = 0; k < nbc; k++){
sum += (d_a[i*nac+k] * d_a[j*nbc+k]);
}
result[(nbf)*i+j] = (sum)*value;
}
|
820d40a8ec74449ec67ee58ac7e8807c3b878c59.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_vecDouble.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_vecDouble), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_vecDouble), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_vecDouble), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 820d40a8ec74449ec67ee58ac7e8807c3b878c59.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_vecDouble.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_vecDouble<<<gridBlock,threadBlock>>>(in,out,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_vecDouble<<<gridBlock,threadBlock>>>(in,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_vecDouble<<<gridBlock,threadBlock>>>(in,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
45baf5dff67d19215a657f2b5351865aa667c18d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define PIx2 6.2831853071795864769252867665590058f
__global__ void ComputePhiMagGPU(int numK, const float* phiR, const float* phiI, float* phiMag){
/********************************************************************
*
* Compute the magnitude of Fourier Transform at each sample point
*
********************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// INSERT KERNEL CODE HERE
if(tid<numK){
float real = phiR[tid];
float imag = phiI[tid];
phiMag[tid] = real*real + imag*imag;
}
}
void basicComputePhiMagGPU(int numK, float* phiR, float* phiI, float* phiMag){
// Initialize thread block and kernel grid dimensions
const unsigned int BLOCK_SIZE = 1024;
dim3 DimGrid((numK-1)/BLOCK_SIZE + 1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
// Call the kernel for calculating magnitude of Phi
hipLaunchKernelGGL(( ComputePhiMagGPU), dim3(DimGrid),dim3(DimBlock), 0, 0, numK, phiR, phiI, phiMag);
}
__global__ void ComputeQGPU(int numK, int numX, const struct kValues* kVals, const float* x, const float* y, const float* z,float* Qr, float* Qi){
/********************************************************************
*
* Calculate Q at each voxel point
*
********************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// INSERT KERNEL CODE HERE
if(tid<numX){
Qr[tid] = 0; Qi[tid] = 0;
for(int m = 0; m < numK; m++){
float exp = PIx2 * (x[tid] * kVals[m].Kx + y[tid] * kVals[m].Ky + z[tid] * kVals[m].Kz);
Qr[tid] += kVals[m].PhiMag * cos(exp);
Qi[tid] += kVals[m].PhiMag * sin(exp);
}
}
void basicComputeQGpu(int numK, int numX, struct kValues* kVals, float* x, float* y, float* z,float* Qr, float* Qi){
// Initialize thread block and kernel grid dimensions
const unsigned int BLOCK_SIZE = 1024;
dim3 DimGrid((numX-1)/BLOCK_SIZE + 1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
// Call the kernel for calculating Q matrix
hipLaunchKernelGGL(( ComputeQGPU), dim3(DimGrid),dim3(DimBlock), 0, 0, numK, numX, kVals, x, y, z, Qr, Qi);
}
| 45baf5dff67d19215a657f2b5351865aa667c18d.cu | /******************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define PIx2 6.2831853071795864769252867665590058f
__global__ void ComputePhiMagGPU(int numK, const float* phiR, const float* phiI, float* phiMag){
/********************************************************************
*
* Compute the magnitude of Fourier Transform at each sample point
*
********************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// INSERT KERNEL CODE HERE
if(tid<numK){
float real = phiR[tid];
float imag = phiI[tid];
phiMag[tid] = real*real + imag*imag;
}
}
void basicComputePhiMagGPU(int numK, float* phiR, float* phiI, float* phiMag){
// Initialize thread block and kernel grid dimensions
const unsigned int BLOCK_SIZE = 1024;
dim3 DimGrid((numK-1)/BLOCK_SIZE + 1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
// Call the kernel for calculating magnitude of Phi
ComputePhiMagGPU<<<DimGrid,DimBlock>>>(numK, phiR, phiI, phiMag);
}
__global__ void ComputeQGPU(int numK, int numX, const struct kValues* kVals, const float* x, const float* y, const float* z,float* Qr, float* Qi){
/********************************************************************
*
* Calculate Q at each voxel point
*
********************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// INSERT KERNEL CODE HERE
if(tid<numX){
Qr[tid] = 0; Qi[tid] = 0;
for(int m = 0; m < numK; m++){
float exp = PIx2 * (x[tid] * kVals[m].Kx + y[tid] * kVals[m].Ky + z[tid] * kVals[m].Kz);
Qr[tid] += kVals[m].PhiMag * cos(exp);
Qi[tid] += kVals[m].PhiMag * sin(exp);
}
}
void basicComputeQGpu(int numK, int numX, struct kValues* kVals, float* x, float* y, float* z,float* Qr, float* Qi){
// Initialize thread block and kernel grid dimensions
const unsigned int BLOCK_SIZE = 1024;
dim3 DimGrid((numX-1)/BLOCK_SIZE + 1,1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
// Call the kernel for calculating Q matrix
ComputeQGPU<<<DimGrid,DimBlock>>>(numK, numX, kVals, x, y, z, Qr, Qi);
}
|
02de4d74b3abf9169a262201a975b5db2a777ce4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <algorithm>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/where_index_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
template <typename T>
__global__ void GetTrueNum(const T *cond_data, const int64_t numel,
int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
true_num_array[idx] =
static_cast<int64_t>(static_cast<bool>(cond_data[idx]));
}
}
template <typename T>
__global__ void SetTrueIndex(int64_t *out_ptr, const T *cond_data,
const int64_t numel, const int64_t *stride_array,
const int64_t rank,
const int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
// true_num_array is calculated by cub::InclusiveSum,
// cause the first element of true_num_array is 1,
// so we need substract 1 to get true index.
const int64_t true_index = true_num_array[idx] - 1;
if (static_cast<bool>(cond_data[idx])) {
int64_t rank_index = idx;
for (int j = 0; j < rank; j++) {
const int64_t out_index = rank_index / stride_array[j];
out_ptr[true_index * rank + j] = out_index;
rank_index -= out_index * stride_array[j];
}
}
}
}
template <typename T>
class CUDAWhereIndexKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *condition = context.Input<framework::Tensor>("Condition");
auto *out = context.Output<framework::Tensor>("Out");
auto &dev_ctx = context.template device_context<CUDADeviceContext>();
const T *cond_data = condition->data<T>();
const int64_t numel = condition->numel();
auto dims = condition->dims();
const int rank = dims.size();
auto d_array_mem = memory::Alloc(dev_ctx, (numel + rank) * sizeof(int64_t));
auto h_array_mem =
memory::Alloc(platform::CPUPlace(), (rank + 1) * sizeof(int64_t));
// "stride_array" is an array and len(stride_array)==rank,
// each element is the stride of each dimension -- the length from i to i+1.
int64_t *h_stride_array = reinterpret_cast<int64_t *>(h_array_mem->ptr());
int64_t *d_stride_array = reinterpret_cast<int64_t *>(d_array_mem->ptr());
// "true_num_array" is an array and len(stride_array)==numel,
// at the beginning,
// "true_num_array" will set 1 if condition[i] == true else 0,
// then it will be calculated by cub::InclusiveSum,
// so that we can get the true number before i as the out index
int64_t *d_true_num_array = d_stride_array + rank;
// the total_true_num is the total number of condition[i] == true
int64_t *h_total_true_num = h_stride_array + rank;
// alloce cub memory
size_t cub_size = 0;
hipcub::DeviceScan::InclusiveSum(nullptr, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
auto cub_mem = memory::Alloc(dev_ctx, cub_size * sizeof(int64_t));
void *cub_data = cub_mem->ptr();
// set d_true_num_array[i]=1 if cond_data[i]==true else 0
const int threads = ::min(numel, static_cast<int64_t>(128));
const int64_t need_grids = (numel + threads - 1) / threads;
const int grids = ::min(need_grids, static_cast<int64_t>(256));
hipLaunchKernelGGL(( GetTrueNum<T>), dim3(grids), dim3(threads), 0, dev_ctx.stream(), cond_data, numel,
d_true_num_array);
// calculate the inclusive prefix sum of "true_num_array"
// to get the index of "out" tensor,
// and the total number of cond_data[i]==true.
// Example:
// condition: F T T F F F T T
// before: 0 1 1 0 0 0 1 1
// after: 0 1 2 2 2 2 3 4
// out: 1 2 6 7
hipcub::DeviceScan::InclusiveSum(cub_data, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
// calculate each dimension's stride
h_stride_array[rank - 1] = 1;
for (int i = rank - 2; i >= 0; i--) {
h_stride_array[i] = h_stride_array[i + 1] * dims[i + 1];
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_stride_array, platform::CPUPlace(), h_stride_array,
rank * sizeof(int64_t), dev_ctx.stream());
// get total ture number and set output size
// the last element of cub::InclusiveSum is the total number
memory::Copy(platform::CPUPlace(), h_total_true_num,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_true_num_array + numel - 1, sizeof(int64_t),
dev_ctx.stream());
dev_ctx.Wait();
int64_t true_num = *h_total_true_num;
out->Resize(framework::make_ddim({static_cast<int64_t>(true_num), rank}));
auto out_data = out->mutable_data<int64_t>(context.GetPlace());
if (true_num == 0) {
return;
}
// using true_num_array and stride_array to calculate the output index
hipLaunchKernelGGL(( SetTrueIndex<T>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
out_data, cond_data, numel, d_stride_array, rank, d_true_num_array);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel<int64_t>,
ops::CUDAWhereIndexKernel<int>,
ops::CUDAWhereIndexKernel<bool>,
ops::CUDAWhereIndexKernel<float>,
ops::CUDAWhereIndexKernel<double>);
| 02de4d74b3abf9169a262201a975b5db2a777ce4.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include <algorithm>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/where_index_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
template <typename T>
__global__ void GetTrueNum(const T *cond_data, const int64_t numel,
int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
true_num_array[idx] =
static_cast<int64_t>(static_cast<bool>(cond_data[idx]));
}
}
template <typename T>
__global__ void SetTrueIndex(int64_t *out_ptr, const T *cond_data,
const int64_t numel, const int64_t *stride_array,
const int64_t rank,
const int64_t *true_num_array) {
const int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int64_t idx = tid; idx < numel; idx += gridDim.x * blockDim.x) {
// true_num_array is calculated by cub::InclusiveSum,
// cause the first element of true_num_array is 1,
// so we need substract 1 to get true index.
const int64_t true_index = true_num_array[idx] - 1;
if (static_cast<bool>(cond_data[idx])) {
int64_t rank_index = idx;
for (int j = 0; j < rank; j++) {
const int64_t out_index = rank_index / stride_array[j];
out_ptr[true_index * rank + j] = out_index;
rank_index -= out_index * stride_array[j];
}
}
}
}
template <typename T>
class CUDAWhereIndexKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *condition = context.Input<framework::Tensor>("Condition");
auto *out = context.Output<framework::Tensor>("Out");
auto &dev_ctx = context.template device_context<CUDADeviceContext>();
const T *cond_data = condition->data<T>();
const int64_t numel = condition->numel();
auto dims = condition->dims();
const int rank = dims.size();
auto d_array_mem = memory::Alloc(dev_ctx, (numel + rank) * sizeof(int64_t));
auto h_array_mem =
memory::Alloc(platform::CPUPlace(), (rank + 1) * sizeof(int64_t));
// "stride_array" is an array and len(stride_array)==rank,
// each element is the stride of each dimension -- the length from i to i+1.
int64_t *h_stride_array = reinterpret_cast<int64_t *>(h_array_mem->ptr());
int64_t *d_stride_array = reinterpret_cast<int64_t *>(d_array_mem->ptr());
// "true_num_array" is an array and len(stride_array)==numel,
// at the beginning,
// "true_num_array" will set 1 if condition[i] == true else 0,
// then it will be calculated by cub::InclusiveSum,
// so that we can get the true number before i as the out index
int64_t *d_true_num_array = d_stride_array + rank;
// the total_true_num is the total number of condition[i] == true
int64_t *h_total_true_num = h_stride_array + rank;
// alloce cub memory
size_t cub_size = 0;
cub::DeviceScan::InclusiveSum(nullptr, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
auto cub_mem = memory::Alloc(dev_ctx, cub_size * sizeof(int64_t));
void *cub_data = cub_mem->ptr();
// set d_true_num_array[i]=1 if cond_data[i]==true else 0
const int threads = std::min(numel, static_cast<int64_t>(128));
const int64_t need_grids = (numel + threads - 1) / threads;
const int grids = std::min(need_grids, static_cast<int64_t>(256));
GetTrueNum<T><<<grids, threads, 0, dev_ctx.stream()>>>(cond_data, numel,
d_true_num_array);
// calculate the inclusive prefix sum of "true_num_array"
// to get the index of "out" tensor,
// and the total number of cond_data[i]==true.
// Example:
// condition: F T T F F F T T
// before: 0 1 1 0 0 0 1 1
// after: 0 1 2 2 2 2 3 4
// out: 1 2 6 7
cub::DeviceScan::InclusiveSum(cub_data, cub_size, d_true_num_array,
d_true_num_array, numel, dev_ctx.stream());
// calculate each dimension's stride
h_stride_array[rank - 1] = 1;
for (int i = rank - 2; i >= 0; i--) {
h_stride_array[i] = h_stride_array[i + 1] * dims[i + 1];
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_stride_array, platform::CPUPlace(), h_stride_array,
rank * sizeof(int64_t), dev_ctx.stream());
// get total ture number and set output size
// the last element of cub::InclusiveSum is the total number
memory::Copy(platform::CPUPlace(), h_total_true_num,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_true_num_array + numel - 1, sizeof(int64_t),
dev_ctx.stream());
dev_ctx.Wait();
int64_t true_num = *h_total_true_num;
out->Resize(framework::make_ddim({static_cast<int64_t>(true_num), rank}));
auto out_data = out->mutable_data<int64_t>(context.GetPlace());
if (true_num == 0) {
return;
}
// using true_num_array and stride_array to calculate the output index
SetTrueIndex<T><<<grids, threads, 0, dev_ctx.stream()>>>(
out_data, cond_data, numel, d_stride_array, rank, d_true_num_array);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel<int64_t>,
ops::CUDAWhereIndexKernel<int>,
ops::CUDAWhereIndexKernel<bool>,
ops::CUDAWhereIndexKernel<float>,
ops::CUDAWhereIndexKernel<double>);
|
78c69e6ac5967a12aa9d27de999774c21617d7ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
sergeim19
April 27, 2015
Burgers equation - GPU CUDA version
*/
#define NADVANCE (4000)
#define nu (5.0e-2)
__global__ void kernel_rescale_u(double *u_dev, int N)
{
int j;
j = blockIdx.x * blockDim.x + threadIdx.x;
u_dev[j] = u_dev[j] / (double)N;
} | 78c69e6ac5967a12aa9d27de999774c21617d7ed.cu | #include "includes.h"
/*
sergeim19
April 27, 2015
Burgers equation - GPU CUDA version
*/
#define NADVANCE (4000)
#define nu (5.0e-2)
__global__ void kernel_rescale_u(double *u_dev, int N)
{
int j;
j = blockIdx.x * blockDim.x + threadIdx.x;
u_dev[j] = u_dev[j] / (double)N;
} |
86e3c8a6bfbeef90066ee1163fe01a65d028b785.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* http://github.com/dusty-nv
*/
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uchar3* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
if (x >= width)
return;
if (y >= height)
return;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1.0f;
const uchar3 px = srcImage[pixel];
dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s);
}
hipError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height );
return CUDA(hipGetLastError());
}
| 86e3c8a6bfbeef90066ee1163fe01a65d028b785.cu | /*
* http://github.com/dusty-nv
*/
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uchar3* srcImage,
float4* dstImage,
uint32_t width, uint32_t height)
{
int x, y, pixel;
x = (blockIdx.x * blockDim.x) + threadIdx.x;
y = (blockIdx.y * blockDim.y) + threadIdx.y;
pixel = y * width + x;
if (x >= width)
return;
if (y >= height)
return;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1.0f;
const uchar3 px = srcImage[pixel];
dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s);
}
cudaError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
RGBToRGBAf<<<gridDim, blockDim>>>( srcDev, destDev, width, height );
return CUDA(cudaGetLastError());
}
|
2c0cf8af714edd5ee2b95cbec754c7c59dd4e6d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/attention_random_conv_layer.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
namespace caffe {
template <typename Dtype>
__global__ void inspect_random_kernel(const int n, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
printf("%f\n",a[index] );
}
}
template <typename Dtype>
__global__ void add_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b;
}
}
template <typename Dtype>
__global__ void mul_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b;
}
}
template <typename Dtype>
void AttentionRandomConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_attention = bottom[1]->gpu_data();
Dtype* random = random_.mutable_gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//printf("forward attention conv layer 0\n");
//srand((unsigned int)time(NULL));
for (int n = 0; n < this->num_; ++n) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_;
Dtype* random_input = random + n * kernel_dim_ * height_ * width_;
// printf("forward attention conv layer 1\n");
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
// printf("forward attention conv layer 2\n");
caffe_gpu_mul( kernel_dim_ * height_ * width_,col_buffer_.gpu_data(),att_input, attention_col_buffer_.mutable_gpu_data());
// printf("forward attention conv layer 3\n");
//printf("lilac!!\n");
float *dataDev;
int number = kernel_dim_ * height_ * width_;
caffe_gpu_rng_uniform(kernel_dim_ * height_ * width_,mini, maxi,random_input);
//printf("lilac!!\n");
//inspect_random_kernel<<<CAFFE_GET_BLOCKS(10), CAFFE_CUDA_NUM_THREADS>>>(10,random_input);
//printf("\n");
//mul_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, maxi - mini, random_input);
//add_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, mini, random_input);
caffe_gpu_mul(kernel_dim_ * height_ * width_ ,attention_col_buffer_.gpu_data(), random_input, attention_col_buffer_.mutable_gpu_data());
const Dtype* att_col_buff = attention_col_buffer_.gpu_data();
// printf("forward attention conv layer 4\n");
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /* N' */, conv_out_spatial_dim_/* H' * W' */ , kernel_dim_/* C * h * w */,
(Dtype)1., weight /* C' * C * h * w */, att_col_buff /* C * h * w * H' * W' */, (Dtype)0., top_data + n * this->top_dim_); // C' * H' * W'
// printf("forward attention conv layer 5\n");
if (this->bias_term_) {
// printf("forward attention conv layer 5.5\n");
const Dtype* bias = this->blobs_[1]->gpu_data();
// printf("forward attention conv layer 6\n");
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
// printf("forward attention conv layer 7\n");
}
}
}
template <typename Dtype>
void AttentionRandomConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_attention = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_att_diff = bottom[1]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[0]) {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_;
if (this->param_propagate_down_[0]) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
const Dtype* col_buff = col_buffer_.gpu_data();
caffe_gpu_mul(kernel_dim_ * height_ * width_,col_buff,att_input, attention_col_buffer_.mutable_gpu_data());
caffe_gpu_mul( kernel_dim_ * height_ * width_,random_.gpu_data(),attention_col_buffer_.gpu_data(), attention_col_buffer_.mutable_gpu_data());
const Dtype* att_col_buff = attention_col_buffer_.gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ /* N' */, kernel_dim_ /* C * h * w */ , conv_out_spatial_dim_/* H' * W' */,
(Dtype)1., top_diff + n * this->top_dim_ , att_col_buff, (Dtype)1., weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[0]) {
Dtype * att_col_diff_buff = attention_col_diff_buff_.mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_, (Dtype)1., weight , top_diff + n * this->top_dim_ , (Dtype)0., att_col_diff_buff );
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,random_.gpu_data(), attention_col_diff_buff_.mutable_gpu_data());
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,att_input, col_diff_buffer_.mutable_gpu_data());
conv_col2im_gpu(col_diff_buffer_.gpu_data(), bottom_diff + n * this->bottom_dim_);
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,col_buffer_.gpu_data(),bottom_att_diff + n * kernel_dim_ * height_ * width_);
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AttentionRandomConvolutionLayer);
} // namespace caffe
| 2c0cf8af714edd5ee2b95cbec754c7c59dd4e6d0.cu | #include <vector>
#include "caffe/layers/attention_random_conv_layer.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
namespace caffe {
template <typename Dtype>
__global__ void inspect_random_kernel(const int n, Dtype* a) {
CUDA_KERNEL_LOOP(index, n) {
printf("%f\n",a[index] );
}
}
template <typename Dtype>
__global__ void add_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b;
}
}
template <typename Dtype>
__global__ void mul_number_kernel(const int n, const Dtype* a, const Dtype b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b;
}
}
template <typename Dtype>
void AttentionRandomConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_attention = bottom[1]->gpu_data();
Dtype* random = random_.mutable_gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
//printf("forward attention conv layer 0\n");
//srand((unsigned int)time(NULL));
for (int n = 0; n < this->num_; ++n) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_;
Dtype* random_input = random + n * kernel_dim_ * height_ * width_;
// printf("forward attention conv layer 1\n");
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
// printf("forward attention conv layer 2\n");
caffe_gpu_mul( kernel_dim_ * height_ * width_,col_buffer_.gpu_data(),att_input, attention_col_buffer_.mutable_gpu_data());
// printf("forward attention conv layer 3\n");
//printf("lilac!!\n");
float *dataDev;
int number = kernel_dim_ * height_ * width_;
caffe_gpu_rng_uniform(kernel_dim_ * height_ * width_,mini, maxi,random_input);
//printf("lilac!!\n");
//inspect_random_kernel<<<CAFFE_GET_BLOCKS(10), CAFFE_CUDA_NUM_THREADS>>>(10,random_input);
//printf("\n");
//mul_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, maxi - mini, random_input);
//add_number_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_dim_ * height_ * width_), CAFFE_CUDA_NUM_THREADS>>>(kernel_dim_ * height_ * width_, random_input, mini, random_input);
caffe_gpu_mul(kernel_dim_ * height_ * width_ ,attention_col_buffer_.gpu_data(), random_input, attention_col_buffer_.mutable_gpu_data());
const Dtype* att_col_buff = attention_col_buffer_.gpu_data();
// printf("forward attention conv layer 4\n");
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /* N' */, conv_out_spatial_dim_/* H' * W' */ , kernel_dim_/* C * h * w */,
(Dtype)1., weight /* C' * C * h * w */, att_col_buff /* C * h * w * H' * W' */, (Dtype)0., top_data + n * this->top_dim_); // C' * H' * W'
// printf("forward attention conv layer 5\n");
if (this->bias_term_) {
// printf("forward attention conv layer 5.5\n");
const Dtype* bias = this->blobs_[1]->gpu_data();
// printf("forward attention conv layer 6\n");
this->forward_gpu_bias(top_data + n * this->top_dim_, bias);
// printf("forward attention conv layer 7\n");
}
}
}
template <typename Dtype>
void AttentionRandomConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_attention = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_att_diff = bottom[1]->mutable_gpu_diff();
// Bias gradient, if necessary.
if (this->bias_term_ && this->param_propagate_down_[1]) {
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
for (int n = 0; n < this->num_; ++n) {
this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_);
}
}
if (this->param_propagate_down_[0] || propagate_down[0]) {
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
const Dtype* att_input = bottom_attention + n * kernel_dim_ * height_ * width_;
if (this->param_propagate_down_[0]) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
const Dtype* col_buff = col_buffer_.gpu_data();
caffe_gpu_mul(kernel_dim_ * height_ * width_,col_buff,att_input, attention_col_buffer_.mutable_gpu_data());
caffe_gpu_mul( kernel_dim_ * height_ * width_,random_.gpu_data(),attention_col_buffer_.gpu_data(), attention_col_buffer_.mutable_gpu_data());
const Dtype* att_col_buff = attention_col_buffer_.gpu_data();
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, conv_out_channels_ /* N' */, kernel_dim_ /* C * h * w */ , conv_out_spatial_dim_/* H' * W' */,
(Dtype)1., top_diff + n * this->top_dim_ , att_col_buff, (Dtype)1., weight_diff);
}
// gradient w.r.t. bottom data, if necessary.
if (propagate_down[0]) {
Dtype * att_col_diff_buff = attention_col_diff_buff_.mutable_gpu_data();
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, kernel_dim_, conv_out_spatial_dim_, conv_out_channels_, (Dtype)1., weight , top_diff + n * this->top_dim_ , (Dtype)0., att_col_diff_buff );
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,random_.gpu_data(), attention_col_diff_buff_.mutable_gpu_data());
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,att_input, col_diff_buffer_.mutable_gpu_data());
conv_col2im_gpu(col_diff_buffer_.gpu_data(), bottom_diff + n * this->bottom_dim_);
caffe_gpu_mul(kernel_dim_ * height_ * width_, attention_col_diff_buff_.gpu_data() ,col_buffer_.gpu_data(),bottom_att_diff + n * kernel_dim_ * height_ * width_);
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AttentionRandomConvolutionLayer);
} // namespace caffe
|
ecec033e9640c54a67b0098f7121f4bae52ea599.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "Material.cuh.cu"
#include "HitRecord.cuh.cu"
#include "Ray.cuh.cu"
#include "utils.cuh.cu"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace RayTracing
{
class Lambertian : public Material
{
protected:
const Texture* const * const m_albedo;
hiprandState_t *m_states;
public:
__host__ __device__
Lambertian(
const float transparency,
const float reflectance,
const Texture* const * const albedo,
hiprandState_t *states
)
: Material(transparency, reflectance),
m_albedo(albedo),
m_states(states)
{}
private:
__host__ __device__
Vector3 RandomUnitSphereSurfaceVector() const
{
#ifdef __CUDA_ARCH__
int id = threadIdx.x + blockDim.x * blockIdx.x;
while (true)
{
Vector3 v{
hiprand_uniform(m_states + id) * 2 - 1,
hiprand_uniform(m_states + id) * 2 - 1,
hiprand_uniform(m_states + id) * 2 - 1
};
if (v.LengthSquared() > 1)
continue;
return v.UnitVector();
}
#else
while (true)
{
Vector3 v{
GenRandom(-1, 1),
GenRandom(-1, 1),
GenRandom(-1, 1),
};
if (v.LengthSquared() > 1)
continue;
return v.UnitVector();
}
#endif
}
__host__ __device__
virtual bool Scatter(
const Ray &ray,
const HitRecord &hitRecord,
Color &attenuation,
Ray &scattered
) const override
{
scattered = Ray(
hitRecord.point,
hitRecord.normal + RandomUnitSphereSurfaceVector()
);
if (scattered.direction.NearZero())
scattered.direction = hitRecord.normal;
attenuation = (*m_albedo)->GetColor(hitRecord.u, hitRecord.v);
return true;
}
};
} // namespace RayTracing
| ecec033e9640c54a67b0098f7121f4bae52ea599.cu | #pragma once
#include "Material.cuh.cu"
#include "HitRecord.cuh.cu"
#include "Ray.cuh.cu"
#include "utils.cuh.cu"
#include <curand.h>
#include <curand_kernel.h>
namespace RayTracing
{
class Lambertian : public Material
{
protected:
const Texture* const * const m_albedo;
curandState *m_states;
public:
__host__ __device__
Lambertian(
const float transparency,
const float reflectance,
const Texture* const * const albedo,
curandState *states
)
: Material(transparency, reflectance),
m_albedo(albedo),
m_states(states)
{}
private:
__host__ __device__
Vector3 RandomUnitSphereSurfaceVector() const
{
#ifdef __CUDA_ARCH__
int id = threadIdx.x + blockDim.x * blockIdx.x;
while (true)
{
Vector3 v{
curand_uniform(m_states + id) * 2 - 1,
curand_uniform(m_states + id) * 2 - 1,
curand_uniform(m_states + id) * 2 - 1
};
if (v.LengthSquared() > 1)
continue;
return v.UnitVector();
}
#else
while (true)
{
Vector3 v{
GenRandom(-1, 1),
GenRandom(-1, 1),
GenRandom(-1, 1),
};
if (v.LengthSquared() > 1)
continue;
return v.UnitVector();
}
#endif
}
__host__ __device__
virtual bool Scatter(
const Ray &ray,
const HitRecord &hitRecord,
Color &attenuation,
Ray &scattered
) const override
{
scattered = Ray(
hitRecord.point,
hitRecord.normal + RandomUnitSphereSurfaceVector()
);
if (scattered.direction.NearZero())
scattered.direction = hitRecord.normal;
attenuation = (*m_albedo)->GetColor(hitRecord.u, hitRecord.v);
return true;
}
};
} // namespace RayTracing
|
b3b4114792da2ec9b6ed2936783c478a429d0577.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GeantCudaUtils.h"
#include "backend/cuda/Interface.h"
#include "CoprocessorBrokerKernel.h"
#include "GeantTaskData.h"
#include "GeantTrack.h"
namespace Geant {
inline namespace cuda {
template void MakeInstanceArrayAt(GeantTaskData *addr, size_t nElements, size_t sizeOf, size_t, int, GeantPropagator *);
template void MakeInstanceAt(GeantTrack_v *addr, unsigned int, int);
__global__ void Clear(GeantTrack_v *tracks) { tracks->Clear(); }
int Clear_gpu(vecgeom::cxx::DevicePtr<Geant::cuda::GeantTrack_v> &tracks, int blocksPerGrid, int threadsPerBlock,
hipStream_t stream)
{
hipLaunchKernelGGL(( Clear), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream, tracks);
GEANT_CUDA_ERROR(hipGetLastError());
return 1;
}
} // cuda
} // Geant
namespace vecgeom {
namespace cxx {
template void DevicePtr<Geant::cuda::GeantConfig>::Construct() const;
template size_t DevicePtr<Geant::cuda::GeantConfig>::SizeOf();
template void DevicePtr<Geant::cuda::GeantPropagator>::Construct(int) const;
template size_t DevicePtr<Geant::cuda::GeantPropagator>::SizeOf();
template size_t DevicePtr<Geant::cuda::GeantTaskData>::SizeOf();
template size_t DevicePtr<Geant::cuda::GeantTrack_v>::SizeOf();
} // cxx
} // vecgeom
| b3b4114792da2ec9b6ed2936783c478a429d0577.cu |
#include "GeantCudaUtils.h"
#include "backend/cuda/Interface.h"
#include "CoprocessorBrokerKernel.h"
#include "GeantTaskData.h"
#include "GeantTrack.h"
namespace Geant {
inline namespace cuda {
template void MakeInstanceArrayAt(GeantTaskData *addr, size_t nElements, size_t sizeOf, size_t, int, GeantPropagator *);
template void MakeInstanceAt(GeantTrack_v *addr, unsigned int, int);
__global__ void Clear(GeantTrack_v *tracks) { tracks->Clear(); }
int Clear_gpu(vecgeom::cxx::DevicePtr<Geant::cuda::GeantTrack_v> &tracks, int blocksPerGrid, int threadsPerBlock,
cudaStream_t stream)
{
Clear<<<blocksPerGrid, threadsPerBlock, 0, stream>>>(tracks);
GEANT_CUDA_ERROR(cudaGetLastError());
return 1;
}
} // cuda
} // Geant
namespace vecgeom {
namespace cxx {
template void DevicePtr<Geant::cuda::GeantConfig>::Construct() const;
template size_t DevicePtr<Geant::cuda::GeantConfig>::SizeOf();
template void DevicePtr<Geant::cuda::GeantPropagator>::Construct(int) const;
template size_t DevicePtr<Geant::cuda::GeantPropagator>::SizeOf();
template size_t DevicePtr<Geant::cuda::GeantTaskData>::SizeOf();
template size_t DevicePtr<Geant::cuda::GeantTrack_v>::SizeOf();
} // cxx
} // vecgeom
|
635788c723cfd74ceb50036404e2609f835a8d77.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <ctime>
#include <time.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <hipfft.h>
#include <fstream>
using namespace std;
typedef float2 Complex;
__global__ void ComplexMUL(Complex *a, Complex *b, int size)
{
int i = threadIdx.x;
a[i].x = (a[i].x * b[i].x - a[i].y*b[i].y) / (size*size*size*size);
a[i].y = (a[i].x * b[i].y + a[i].y*b[i].x) / (size*size*size*size);
}
int main()
{
int N = 5;
int SIZE = N*N;
Complex *fg = new Complex[SIZE];
for (int i = 0; i < SIZE; i++){
fg[i].x = 1;
fg[i].y = 0;
}
Complex *fig = new Complex[SIZE];
for (int i = 0; i < SIZE; i++){
fig[i].x = i%2; //
fig[i].y = 0;
}
for (int i = 0; i < 24; i=i+5)
{
cout << fg[i].x << " " << fg[i + 1].x << " " << fg[i + 2].x << " " << fg[i + 3].x << " " << fg[i + 4].x << endl;
}
cout << "----------------" << endl;
for (int i = 0; i < 24; i = i + 5)
{
cout << fig[i].x << " " << fig[i + 1].x << " " << fig[i + 2].x << " " << fig[i + 3].x << " " << fig[i + 4].x << endl;
}
cout << "----------------" << endl;
int mem_size = sizeof(Complex)* SIZE;
hipfftComplex *d_signal;
checkCudaErrors(hipMalloc((void **) &d_signal, mem_size));
checkCudaErrors(hipMemcpy(d_signal, fg, mem_size, hipMemcpyHostToDevice));
hipfftComplex *d_filter_kernel;
checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size));
checkCudaErrors(hipMemcpy(d_filter_kernel, fig, mem_size, hipMemcpyHostToDevice));
// cout << d_signal[1].x << endl;
// CUFFT plan
hipfftHandle plan;
hipfftPlan2d(&plan, N, N, HIPFFT_C2C);
// Transform signal and filter
printf("Transforming signal hipfftExecR2C\n");
hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD);
hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD);
printf("Launching Complex multiplication<<< >>>\n");
hipLaunchKernelGGL(( ComplexMUL) , dim3(32), dim3(256) , 0, 0, d_signal, d_filter_kernel, N);
// Transform signal back
printf("Transforming signal back hipfftExecC2C\n");
hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD);
Complex *result = new Complex[SIZE];
hipMemcpy(result, d_signal, sizeof(Complex)*SIZE, hipMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i=i+5)
{
cout << result[i].x << " " << result[i + 1].x << " " << result[i + 2].x << " " << result[i + 3].x << " " << result[i + 4].x << endl;
}
delete result, fg, fig;
hipfftDestroy(plan);
//hipfftDestroy(plan2);
hipFree(d_signal);
hipFree(d_filter_kernel);
}
| 635788c723cfd74ceb50036404e2609f835a8d77.cu | #include <cuda_runtime.h>
#include "device_launch_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <ctime>
#include <time.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <cufft.h>
#include <fstream>
using namespace std;
typedef float2 Complex;
__global__ void ComplexMUL(Complex *a, Complex *b, int size)
{
int i = threadIdx.x;
a[i].x = (a[i].x * b[i].x - a[i].y*b[i].y) / (size*size*size*size);
a[i].y = (a[i].x * b[i].y + a[i].y*b[i].x) / (size*size*size*size);
}
int main()
{
int N = 5;
int SIZE = N*N;
Complex *fg = new Complex[SIZE];
for (int i = 0; i < SIZE; i++){
fg[i].x = 1;
fg[i].y = 0;
}
Complex *fig = new Complex[SIZE];
for (int i = 0; i < SIZE; i++){
fig[i].x = i%2; //
fig[i].y = 0;
}
for (int i = 0; i < 24; i=i+5)
{
cout << fg[i].x << " " << fg[i + 1].x << " " << fg[i + 2].x << " " << fg[i + 3].x << " " << fg[i + 4].x << endl;
}
cout << "----------------" << endl;
for (int i = 0; i < 24; i = i + 5)
{
cout << fig[i].x << " " << fig[i + 1].x << " " << fig[i + 2].x << " " << fig[i + 3].x << " " << fig[i + 4].x << endl;
}
cout << "----------------" << endl;
int mem_size = sizeof(Complex)* SIZE;
cufftComplex *d_signal;
checkCudaErrors(cudaMalloc((void **) &d_signal, mem_size));
checkCudaErrors(cudaMemcpy(d_signal, fg, mem_size, cudaMemcpyHostToDevice));
cufftComplex *d_filter_kernel;
checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size));
checkCudaErrors(cudaMemcpy(d_filter_kernel, fig, mem_size, cudaMemcpyHostToDevice));
// cout << d_signal[1].x << endl;
// CUFFT plan
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_C2C);
// Transform signal and filter
printf("Transforming signal cufftExecR2C\n");
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD);
cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD);
printf("Launching Complex multiplication<<< >>>\n");
ComplexMUL <<< 32, 256 >>>(d_signal, d_filter_kernel, N);
// Transform signal back
printf("Transforming signal back cufftExecC2C\n");
cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE);
Complex *result = new Complex[SIZE];
cudaMemcpy(result, d_signal, sizeof(Complex)*SIZE, cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i=i+5)
{
cout << result[i].x << " " << result[i + 1].x << " " << result[i + 2].x << " " << result[i + 3].x << " " << result[i + 4].x << endl;
}
delete result, fg, fig;
cufftDestroy(plan);
//cufftDestroy(plan2);
cudaFree(d_signal);
cudaFree(d_filter_kernel);
}
|
98ec12c4b6feffbe43ebd19e7088a2fb485ded90.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename T>
void test_cuda_complex_cwise_ops() {
const int kNumItems = 2;
std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);
std::complex<T>* d_in1;
std::complex<T>* d_in2;
std::complex<T>* d_out;
hipMalloc((void**)(&d_in1), complex_bytes);
hipMalloc((void**)(&d_in2), complex_bytes);
hipMalloc((void**)(&d_out), complex_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
d_out, kNumItems);
const std::complex<T> a(3.14f, 2.7f);
const std::complex<T> b(-10.6f, 1.4f);
gpu_in1.device(gpu_device) = gpu_in1.constant(a);
gpu_in2.device(gpu_device) = gpu_in2.constant(b);
enum CwiseOp {
Add = 0,
Sub,
Mul,
Div
};
Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
for (int op = Add; op <= Div; op++) {
std::complex<T> expected;
switch (static_cast<CwiseOp>(op)) {
case Add:
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
expected = a + b;
break;
case Sub:
gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
expected = a - b;
break;
case Mul:
gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
expected = a * b;
break;
case Div:
gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
expected = a / b;
break;
}
assert(hipMemcpyAsync(actual.data(), d_out, complex_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < kNumItems; ++i) {
VERIFY_IS_APPROX(actual(i), expected);
}
}
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out);
}
void test_cxx11_tensor_complex_cwise_ops()
{
CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
}
| 98ec12c4b6feffbe43ebd19e7088a2fb485ded90.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename T>
void test_cuda_complex_cwise_ops() {
const int kNumItems = 2;
std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);
std::complex<T>* d_in1;
std::complex<T>* d_in2;
std::complex<T>* d_out;
cudaMalloc((void**)(&d_in1), complex_bytes);
cudaMalloc((void**)(&d_in2), complex_bytes);
cudaMalloc((void**)(&d_out), complex_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
d_out, kNumItems);
const std::complex<T> a(3.14f, 2.7f);
const std::complex<T> b(-10.6f, 1.4f);
gpu_in1.device(gpu_device) = gpu_in1.constant(a);
gpu_in2.device(gpu_device) = gpu_in2.constant(b);
enum CwiseOp {
Add = 0,
Sub,
Mul,
Div
};
Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
for (int op = Add; op <= Div; op++) {
std::complex<T> expected;
switch (static_cast<CwiseOp>(op)) {
case Add:
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
expected = a + b;
break;
case Sub:
gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
expected = a - b;
break;
case Mul:
gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
expected = a * b;
break;
case Div:
gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
expected = a / b;
break;
}
assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < kNumItems; ++i) {
VERIFY_IS_APPROX(actual(i), expected);
}
}
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
}
void test_cxx11_tensor_complex_cwise_ops()
{
CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
}
|
260fe58adc0f1dfa3aa1b31921a75845bd488686.hip | // !!! This is a file automatically generated by hipify!!!
//#include<cuda.h>
//#include <hip/hip_runtime.h>
//#include<stdio.h>
////int main(void) {
//// const int Width = 5;
//// float M[Width*Width], N[Width*Width], P[Width*Width];
//// for(int i = 0; i < (Width*Width) ; i++) {
//// M[i] = 5;
//// N[i] = 5;
//// P[i] = 0;
//// }
//// MatrixMultiplication(M, N, P, Width);
//// for(int i = 0; i < (Width*Width) ; i++) {
//// printf("%d \n", P[i]);
//// }
//// int quit;
//// scanf("%d",&quit);
//// return 0;
////}
//Matrix multiplication kernel - thread specification
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) {
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0;
for(int k = 0; k < Width ; ++k) {
float Mdelement = Md[ty*Width + k];
float Ndelement = Nd[k*Width + tx];
Pvalue += (Mdelement*Ndelement);
}
Pd[ty*Width + tx] = Pvalue;
}
void MatrixMultiplication(float *M, float *N, float *P, int Width) {
int size = Width*Width*sizeof(float);
float *Md, *Nd, *Pd;
//Transfer M and N to device memory
hipMalloc((void**)&Md, size);
hipMemcpy(Md,M,size,hipMemcpyHostToDevice);
hipMalloc((void**)&Nd, size);
hipMemcpy(Nd,N,size,hipMemcpyHostToDevice);
//Allocate P on the device
hipMalloc((void**)&Pd,size);
//Setup the execution configuration
dim3 dimBlock(Width,Width);
dim3 dimGrid(1,1);
//Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd,Width);
//Transfer P from device to host
hipMemcpy(P,Pd,size,hipMemcpyDeviceToHost);
//Free device matrices
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
| 260fe58adc0f1dfa3aa1b31921a75845bd488686.cu | //#include<cuda.h>
//#include <cuda_runtime.h>
//#include<stdio.h>
////int main(void) {
//// const int Width = 5;
//// float M[Width*Width], N[Width*Width], P[Width*Width];
//// for(int i = 0; i < (Width*Width) ; i++) {
//// M[i] = 5;
//// N[i] = 5;
//// P[i] = 0;
//// }
//// MatrixMultiplication(M, N, P, Width);
//// for(int i = 0; i < (Width*Width) ; i++) {
//// printf("%d \n", P[i]);
//// }
//// int quit;
//// scanf("%d",&quit);
//// return 0;
////}
//Matrix multiplication kernel - thread specification
__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) {
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0;
for(int k = 0; k < Width ; ++k) {
float Mdelement = Md[ty*Width + k];
float Ndelement = Nd[k*Width + tx];
Pvalue += (Mdelement*Ndelement);
}
Pd[ty*Width + tx] = Pvalue;
}
void MatrixMultiplication(float *M, float *N, float *P, int Width) {
int size = Width*Width*sizeof(float);
float *Md, *Nd, *Pd;
//Transfer M and N to device memory
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
//Allocate P on the device
cudaMalloc((void**)&Pd,size);
//Setup the execution configuration
dim3 dimBlock(Width,Width);
dim3 dimGrid(1,1);
//Launch the device computation threads!
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width);
//Transfer P from device to host
cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
724b76d6c00c4e663529bf71ea32070d61933f65.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <iostream>
#include <random>
#include <ctime>
#include <limits>
/**
* generate random double with range: @fMin ~ @fMax
*/
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
/**
* create balls with radius @r, coordinate (@_x, @_y), velocity vector <@v_x, @v_y>
*/
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(-5.0, 5.0);
v_y = fRand(-5.0, 5.0);
r = 1.0;
}
};
__device__ double infty(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return __longlong_as_double(ieee754inf);
}
/**
* @n obstacles
* for each obstacle, return time elapsed when collison starts @t_s and ends @t_e
* stored in @list[]
*/
__global__ void intersectTime_g(int n, Obstacle points[], double list[])
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// process each obstacle
for(int j = index; j < n; j += stride)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a._x * a.v_x >=0 || a._y * a.v_y >= 0)
{
t_s = infty();
t_e = infty();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((double)3.0) / v;
t_s = (sqrt(d * d - 1.0) / v ) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
void intersectTime_c(int n, Obstacle points[], double list[])
{
for(int j = 0; j < n; j++)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
//distance travelled when collision starts @d_s and ends @d_e
double d_s = d - 2.0;
double d_e = d + 2.0;
//velocity @v of obstacle
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time elapsed when collision starts @t_s and ends @t_e
double t_s = d_s / v;
double t_e = d_e / v;
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
// for test output
//printf("CPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n",a._x, a._y, v, t_s, t_e);
}
}
int main()
{
Obstacle * points = new Obstacle[100 * 10];
Obstacle* points_g;
hipMallocManaged(&points_g, 100 * 10 * sizeof(Obstacle));
double* list_g;
hipMallocManaged(&list_g, 100 * 10 * 2 * sizeof(double));
double* list_c = new double[100 * 10 * 2];
//(@n*10) obstacles
for(int n = 0; n < 100; n++)
{
double total_time_c = 0.0;
double total_time_g = 0.0;
for(int s = 0; s < 1000; s++)
{
//create same set of points for both CPU and GPU
for(int i = 0; i < n * 10; i++)
{
points[i] = Obstacle();
}
//GPU
//copy points to GPU
hipMemcpy(points_g, points, n * 10 * sizeof(Obstacle), hipMemcpyHostToDevice);
//initialize list: store 2 time data for each obstacle
//process obstacles
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
//timing
clock_t time = clock();
hipLaunchKernelGGL(( intersectTime_g), dim3(numBlocks), dim3(blockSize), 0, 0, n * 10, points_g, list_g);
hipDeviceSynchronize();
time = clock() - time;
double elapsed_g = time / (double) CLOCKS_PER_SEC;
total_time_g += elapsed_g;
//housekeeping
//CPU
clock_t e = clock();
intersectTime_c(n * 10, points, list_c);
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
total_time_c += elapsed_c;
}
printf("%d GPU: %.8lf s ", (n * 10), total_time_g);
printf("CPU: %.8lf s ", total_time_c);
printf("%.2lf \n", total_time_c / total_time_g);
}
hipFree(points_g);
hipFree(list_g);
delete[] list_c;
}
| 724b76d6c00c4e663529bf71ea32070d61933f65.cu | #include <cuda.h>
#include <cmath>
#include <iostream>
#include <random>
#include <ctime>
#include <limits>
/**
* generate random double with range: @fMin ~ @fMax
*/
double fRand(double fMin, double fMax)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(fMin, fMax);
double a = dis(gen);
return a;
}
/**
* create balls with radius @r, coordinate (@_x, @_y), velocity vector <@v_x, @v_y>
*/
struct Obstacle
{
public:
double _x, _y, v_x, v_y, r;
Obstacle()
{
_x = fRand(-100.0, 100.0);
_y = fRand(-100.0, 100.0);
v_x = fRand(-5.0, 5.0);
v_y = fRand(-5.0, 5.0);
r = 1.0;
}
};
__device__ double infty(void)
{
const unsigned long long ieee754inf = 0x7ff0000000000000;
return __longlong_as_double(ieee754inf);
}
/**
* @n obstacles
* for each obstacle, return time elapsed when collison starts @t_s and ends @t_e
* stored in @list[]
*/
__global__ void intersectTime_g(int n, Obstacle points[], double list[])
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// process each obstacle
for(int j = index; j < n; j += stride)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
double t_s = 0;
double t_e = 0;
//Case 1: object alrd collide w scooter
if(d <= 1)
{
t_s = 0;
t_e = infty();
}
//Case 2: object move in opposite dir w.r.t scooter
else if(a._x * a.v_x >=0 || a._y * a.v_y >= 0)
{
t_s = infty();
t_e = infty();
} else
{
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
double delta_t = 2 * sqrt((double)3.0) / v;
t_s = (sqrt(d * d - 1.0) / v ) - 0.5 * delta_t;
t_e = t_s + delta_t;
}
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
//for test output
//printf("GPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n", a._x, a._y, v, t_s, t_e);
}
}
void intersectTime_c(int n, Obstacle points[], double list[])
{
for(int j = 0; j < n; j++)
{
Obstacle a = points[j];
//distance @d b/w obstacle and scooter
double d = sqrt(a._x * a._x + a._y * a._y);
//distance travelled when collision starts @d_s and ends @d_e
double d_s = d - 2.0;
double d_e = d + 2.0;
//velocity @v of obstacle
double v = sqrt(a.v_x * a.v_x + a.v_y * a.v_y);
//time elapsed when collision starts @t_s and ends @t_e
double t_s = d_s / v;
double t_e = d_e / v;
//store in list[j]
list[2 * j] = t_s;
list[2 * j + 1] = t_e;
// for test output
//printf("CPU: (%.2lf, %.2lf), v = %.3lf, t_s = %.2lf, t_e = %.2lf\n",a._x, a._y, v, t_s, t_e);
}
}
int main()
{
Obstacle * points = new Obstacle[100 * 10];
Obstacle* points_g;
cudaMallocManaged(&points_g, 100 * 10 * sizeof(Obstacle));
double* list_g;
cudaMallocManaged(&list_g, 100 * 10 * 2 * sizeof(double));
double* list_c = new double[100 * 10 * 2];
//(@n*10) obstacles
for(int n = 0; n < 100; n++)
{
double total_time_c = 0.0;
double total_time_g = 0.0;
for(int s = 0; s < 1000; s++)
{
//create same set of points for both CPU and GPU
for(int i = 0; i < n * 10; i++)
{
points[i] = Obstacle();
}
//GPU
//copy points to GPU
cudaMemcpy(points_g, points, n * 10 * sizeof(Obstacle), cudaMemcpyHostToDevice);
//initialize list: store 2 time data for each obstacle
//process obstacles
int blockSize = 256;
int numBlocks = (n * 10 + blockSize - 1) / blockSize;
//timing
clock_t time = clock();
intersectTime_g<<<numBlocks, blockSize>>>(n * 10, points_g, list_g);
cudaDeviceSynchronize();
time = clock() - time;
double elapsed_g = time / (double) CLOCKS_PER_SEC;
total_time_g += elapsed_g;
//housekeeping
//CPU
clock_t e = clock();
intersectTime_c(n * 10, points, list_c);
e = clock() - e;
double elapsed_c = e / (double) CLOCKS_PER_SEC;
total_time_c += elapsed_c;
}
printf("%d GPU: %.8lf s ", (n * 10), total_time_g);
printf("CPU: %.8lf s ", total_time_c);
printf("%.2lf \n", total_time_c / total_time_g);
}
cudaFree(points_g);
cudaFree(list_g);
delete[] list_c;
}
|
6fb5e3d385d379c96449ff1cbd599274ea402d53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <stdio.h>
#define TPB 12
#define ATOMIC 1
__global__
void even_sort(int *d_b, const int n){
int tid = threadIdx.x;//0
if(1 == (tid + 1 ) % 2)//
{
if(d_b[tid] > d_b[tid + 1] && tid + 1 < n){
int tmp = d_b[tid];
d_b[tid] = d_b[tid + 1];
d_b[tid + 1] = tmp;
}
}
__syncthreads();
}
__global__
void odd_sort(int *d_b, const int n){
int tid = threadIdx.x;//0
if(0 == (tid + 1 ) % 2)//
{
if(d_b[tid] > d_b[tid + 1] && tid + 1 < n){
int tmp = d_b[tid];
d_b[tid] = d_b[tid + 1];
d_b[tid + 1] = tmp;
}
}
}
void dotLauncher(int *b, int n){
int *d_b = NULL;
hipMalloc(&d_b, n*sizeof(int));
hipMemcpy(d_b, b, n*sizeof(int), hipMemcpyHostToDevice);
for(int i = 0; i < n; ++i){
hipLaunchKernelGGL(( even_sort), dim3(1), dim3(n), 0, 0, d_b, n);
hipLaunchKernelGGL(( odd_sort), dim3(1), dim3(n), 0, 0, d_b, n);
}
hipMemcpy(b, d_b, n*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_b);
}
| 6fb5e3d385d379c96449ff1cbd599274ea402d53.cu | #include "kernel.h"
#include <stdio.h>
#define TPB 12
#define ATOMIC 1
__global__
void even_sort(int *d_b, const int n){
int tid = threadIdx.x;//线程从0开始编号
if(1 == (tid + 1 ) % 2)//第奇数个轮回
{
if(d_b[tid] > d_b[tid + 1] && tid + 1 < n){
int tmp = d_b[tid];
d_b[tid] = d_b[tid + 1];
d_b[tid + 1] = tmp;
}
}
__syncthreads();
}
__global__
void odd_sort(int *d_b, const int n){
int tid = threadIdx.x;//线程从0开始编号
if(0 == (tid + 1 ) % 2)//第奇数个轮回
{
if(d_b[tid] > d_b[tid + 1] && tid + 1 < n){
int tmp = d_b[tid];
d_b[tid] = d_b[tid + 1];
d_b[tid + 1] = tmp;
}
}
}
void dotLauncher(int *b, int n){
int *d_b = NULL;
cudaMalloc(&d_b, n*sizeof(int));
cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice);
for(int i = 0; i < n; ++i){
even_sort<<<1, n, 0>>>(d_b, n);
odd_sort<<<1, n, 0>>>(d_b, n);
}
cudaMemcpy(b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
}
|
409219d223d56d705cb2f62f8f2a29b1faabe47a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on gelu plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: Add (bias) before Gelu is merged into this op to get better performance.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_call.h"
#include "contrib_ops/cuda/bert/fast_gelu_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// constants for approximating the normal cdf
constexpr float A = 0.5f;
constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI)
constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI)
template <typename T, unsigned TPB>
__global__ void FastGeluKernel(const T a, const T b, const T c, int input_length, int bias_length,
const T* input, const T* bias, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < input_length) {
const T x = input[idx];
const T in = (bias == nullptr) ? x : (T)(x + bias[idx % bias_length]);
const T cdf = a + a * _Tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
template <unsigned TPB>
__global__ void FastGeluKernel2(const half2 a, const half2 b, const half2 c, int input_length, int bias_length,
const half2* input, const half2* bias, half2* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < input_length) {
const half2 x = input[idx];
const half2 in = (bias == nullptr) ? x : (x + bias[idx % bias_length]);
const half2 cdf = a + a * _Tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
template <>
Status LaunchFastGeluKernel(const hipDeviceProp_t& prop, hipStream_t stream, int input_length, int bias_length,
const float* input, const float* bias, float* output, bool /*use_half2*/) {
constexpr int blockSize = 256;
const int gridSize = (input_length + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FastGeluKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, input_length, bias_length,
input, bias, output);
return CUDA_CALL(hipGetLastError());
}
template <>
Status LaunchFastGeluKernel(const hipDeviceProp_t& prop, hipStream_t stream, int input_length, int bias_length,
const half* input, const half* bias, half* output, bool use_half2) {
constexpr int blockSize = 256;
if (use_half2 && 0 == (bias_length & 1) && prop.major >= 7) {
const int n = input_length / 2;
const int gridSize = (n + blockSize - 1) / blockSize;
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* bias2 = reinterpret_cast<const half2*>(bias);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( FastGeluKernel2<blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A2, B2, C2, n, bias_length / 2,
input2, bias2, output2);
} else {
const int gridSize = (input_length + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FastGeluKernel<half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, input_length, bias_length,
input, bias, output);
}
return CUDA_CALL(hipGetLastError());
}
template <>
Status LaunchFastGeluKernel(const hipDeviceProp_t& prop, hipStream_t stream, int input_length, int bias_length,
const BFloat16* input, const BFloat16* bias, BFloat16* output, bool /*use_half2*/) {
constexpr int blockSize = 256;
// remove nv_bfloat162 implementation for now to fix build issue
// we can decide whether to add it back if there's perf concern
const int gridSize = (input_length + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FastGeluKernel<BFloat16, blockSize>)
, dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, input_length, bias_length, input, bias, output);
return CUDA_CALL(hipGetLastError());
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 409219d223d56d705cb2f62f8f2a29b1faabe47a.cu | /*
The implementation of this file is based on gelu plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: Add (bias) before Gelu is merged into this op to get better performance.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/cuda_call.h"
#include "contrib_ops/cuda/bert/fast_gelu_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
// constants for approximating the normal cdf
constexpr float A = 0.5f;
constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI)
constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI)
template <typename T, unsigned TPB>
__global__ void FastGeluKernel(const T a, const T b, const T c, int input_length, int bias_length,
const T* input, const T* bias, T* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < input_length) {
const T x = input[idx];
const T in = (bias == nullptr) ? x : (T)(x + bias[idx % bias_length]);
const T cdf = a + a * _Tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
template <unsigned TPB>
__global__ void FastGeluKernel2(const half2 a, const half2 b, const half2 c, int input_length, int bias_length,
const half2* input, const half2* bias, half2* output) {
const int idx = blockIdx.x * TPB + threadIdx.x;
if (idx < input_length) {
const half2 x = input[idx];
const half2 in = (bias == nullptr) ? x : (x + bias[idx % bias_length]);
const half2 cdf = a + a * _Tanh(in * (c * in * in + b));
output[idx] = in * cdf;
}
}
template <>
Status LaunchFastGeluKernel(const cudaDeviceProp& prop, cudaStream_t stream, int input_length, int bias_length,
const float* input, const float* bias, float* output, bool /*use_half2*/) {
constexpr int blockSize = 256;
const int gridSize = (input_length + blockSize - 1) / blockSize;
FastGeluKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, input_length, bias_length,
input, bias, output);
return CUDA_CALL(cudaGetLastError());
}
template <>
Status LaunchFastGeluKernel(const cudaDeviceProp& prop, cudaStream_t stream, int input_length, int bias_length,
const half* input, const half* bias, half* output, bool use_half2) {
constexpr int blockSize = 256;
if (use_half2 && 0 == (bias_length & 1) && prop.major >= 7) {
const int n = input_length / 2;
const int gridSize = (n + blockSize - 1) / blockSize;
const half2 A2 = __floats2half2_rn(A, A);
const half2 B2 = __floats2half2_rn(B, B);
const half2 C2 = __floats2half2_rn(C, C);
const half2* input2 = reinterpret_cast<const half2*>(input);
const half2* bias2 = reinterpret_cast<const half2*>(bias);
half2* output2 = reinterpret_cast<half2*>(output);
FastGeluKernel2<blockSize><<<gridSize, blockSize, 0, stream>>>(A2, B2, C2, n, bias_length / 2,
input2, bias2, output2);
} else {
const int gridSize = (input_length + blockSize - 1) / blockSize;
FastGeluKernel<half, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, input_length, bias_length,
input, bias, output);
}
return CUDA_CALL(cudaGetLastError());
}
template <>
Status LaunchFastGeluKernel(const cudaDeviceProp& prop, cudaStream_t stream, int input_length, int bias_length,
const BFloat16* input, const BFloat16* bias, BFloat16* output, bool /*use_half2*/) {
constexpr int blockSize = 256;
// remove nv_bfloat162 implementation for now to fix build issue
// we can decide whether to add it back if there's perf concern
const int gridSize = (input_length + blockSize - 1) / blockSize;
FastGeluKernel<BFloat16, blockSize>
<<<gridSize, blockSize, 0, stream>>>(A, B, C, input_length, bias_length, input, bias, output);
return CUDA_CALL(cudaGetLastError());
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
7d8814aecff8686ebcbb92fe70b8b9a0b41ed971.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReplicationPadding.cu"
#else
void THNN_(SpatialReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input,
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1 , 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReplicationPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, padT, padB, padL, padR);
}
void THNN_(SpatialReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialReplicationPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, padT, padB, padL, padR);
}
#endif
| 7d8814aecff8686ebcbb92fe70b8b9a0b41ed971.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialReplicationPadding.cu"
#else
void THNN_(SpatialReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 3 || numInputDims == 4), 2, input,
"non-empty 3D or 4D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 4) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputH = THCTensor_(size)(state, input, dimh);
int inputW = THCTensor_(size)(state, input, dimw);
int outputH = inputH + padT + padB;
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1 || outputH >= 1 , 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
inputH, inputW, outputH, outputW);
THCDeviceTensor<real, 4> devInput;
THCDeviceTensor<real, 4> devOutput;
if (numInputDims == 3) {
THCTensor_(resize3d)(state, output, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<real, 3>(state, output).upcastOuter<4>();
} else {
THCTensor_(resize4d)(state, output, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<real, 4>(state, input);
devOutput = toDeviceTensor<real, 4>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReplicationPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, padT, padB, padL, padR);
}
void THNN_(SpatialReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR,
int padT, int padB) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 4) {
planeDim++;
dimh++;
dimw++;
}
int iheight = input->size[dimh];
int iwidth = input->size[dimw];
int oheight = iheight + padT + padB;
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THArgCheck(oheight == THCTensor_(size)(state, gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THCTensor_(size)(state, gradOutput, dimh));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 4> devGradInput;
THCDeviceTensor<real, 4> devGradOutput;
if (numInputDims == 3) {
devGradInput = toDeviceTensor<real, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<real, 4>(state, gradInput);
devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
SpatialReplicationPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, padT, padB, padL, padR);
}
#endif
|
d7d7d3800ddab91546b4bf71a0a6687844c524c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif // USE_GREENTEA
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void MaxPoolNDForward(const int n, const int num_axes,
const Dtype* bottom_data,
const int channels, const int* size,
const int* pooled_size, const int* kernel_size,
const int* ext_kernel_size, const int* stride,
const int* kstride, const int* pad,
Dtype* top_data, int* mask, Dtype* top_mask) {
int d_idx[6]; // NOLINT(runtime/arrays)
int d_start[6]; // NOLINT(runtime/arrays)
int d_end[6]; // NOLINT(runtime/arrays)
int d_iter[6]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
int offset = 1;
int num = index;
for (i = num_axes - 1; i >= 0; --i) {
d_idx[i] = index % pooled_size[i];
d_start[i] = d_idx[i] * stride[i] - pad[i];
d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);
d_start[i] = max(d_start[i], 0);
num /= pooled_size[i];
offset *= size[i];
d_iter[i] = d_start[i];
if (d_start[i] >= d_end[i]) {
top_data[index] = -FLT_MAX;
if (mask) {
mask[index] = -1;
} else {
top_mask[index] = -1;
}
return;
}
}
int chan = num % channels;
num /= channels;
offset *= (num * channels + chan);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
int final_offset = 0;
bool incremented;
do {
final_offset = offset;
int size_prod = 1;
for (i = num_axes - 1; i >= 0; --i) {
final_offset += d_iter[i] * size_prod;
size_prod *= size[i];
}
if (bottom_data[final_offset] > maxval) {
maxidx = final_offset;
maxval = bottom_data[maxidx];
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
if (d_iter[i] >= d_end[i] - kstride[i]) {
d_iter[i] = d_start[i];
} else {
d_iter[i] += kstride[i];
incremented = true;
break;
}
}
} while (incremented);
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void PoolingNDLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolNDForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, num_spatial_axes_, bottom_data,
channels_, size_.gpu_data(), pooled_size_.gpu_data(),
kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(),
stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(),
top_data, mask, top_mask);
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_forward_nd"));
viennacl::ocl::enqueue(
oclk_max_pool_forward(count, num_spatial_axes_,
WrapHandle((cl_mem)bottom_data, &ctx),
channels_,
WrapHandle((cl_mem)(size_.gpu_data()), &ctx),
WrapHandle((cl_mem)(pooled_size_.gpu_data()), &ctx),
WrapHandle((cl_mem)(kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem)(ext_kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem)(stride_.gpu_data()), &ctx),
WrapHandle((cl_mem)(kstride_.gpu_data()), &ctx),
WrapHandle((cl_mem)(pad_.gpu_data()), &ctx),
WrapHandle((cl_mem)top_data, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem)mask, &ctx),
WrapHandle((cl_mem)top_mask, &ctx)),
ctx.get_queue());
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void MaxPoolNDBackward(const int n, const int num_axes,
const Dtype* top_diff, const int* mask,
const Dtype* top_mask,
const int channels, const int* size,
const int* pooled_size,
const int* kernel_size,
const int* ext_kernel_size, const int* stride,
const int* kstride, const int* pad,
Dtype* bottom_diff) {
int d_idx[6]; // NOLINT(runtime/arrays)
int d_start[6]; // NOLINT(runtime/arrays)
int d_end[6]; // NOLINT(runtime/arrays)
int d_iter[6]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
// find out the local index
// find out the local offset
int offset = 1;
int num = index;
for (i = num_axes - 1; i >= 0; --i) {
d_idx[i] = num % size[i];
d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?
d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;
d_end[i] = (d_idx[i] >= pooled_size[i]) ?
(pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %
kstride[i] : d_idx[i];
num /= size[i];
offset *= pooled_size[i];
d_iter[i] = d_start[i];
if (d_start[i] > d_end[i]) {
bottom_diff[index] = 0;
return;
}
}
int chan = num % channels;
num /= channels;
offset *= (num * channels + chan);
Dtype gradient = 0;
int final_offset = 0;
int im_offset = 0;
bool incremented;
do {
final_offset = offset;
im_offset = 0;
int size_prod = 1;
int pooled_size_prod = 1;
for (i = num_axes - 1; i >= 0; --i) {
final_offset += d_iter[i] * pooled_size_prod;
im_offset += d_idx[i] * size_prod;
size_prod *= size[i];
pooled_size_prod *= pooled_size[i];
}
if (mask) {
if (mask[final_offset] == im_offset) {
gradient += top_diff[final_offset];
}
} else {
if (top_mask[final_offset] == im_offset) {
gradient += top_diff[final_offset];
}
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
if (d_iter[i] > d_end[i] - kstride[i]) {
d_iter[i] = d_start[i];
} else {
d_iter[i] += kstride[i];
incremented = true;
break;
}
}
} while (incremented);
bottom_diff[index] = gradient;
}
}
#endif // USE_ROCM
template<typename Dtype>
void PoolingNDLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(count, Dtype(0.), bottom_diff);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolNDBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, num_spatial_axes_, top_diff, mask, top_mask,
channels_, size_.gpu_data(), pooled_size_.gpu_data(),
kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(),
stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
greentea_gpu_set(this->device_context_->id(), count, Dtype(0.),
(cl_mem) bottom_diff, 0);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_backward_nd"));
viennacl::ocl::enqueue(
oclk_max_pool_backward(
count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx), channels_,
WrapHandle((cl_mem) (size_.gpu_data()), &ctx),
WrapHandle((cl_mem) (pooled_size_.gpu_data()), &ctx),
WrapHandle((cl_mem) (kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem) (ext_kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem) (stride_.gpu_data()), &ctx),
WrapHandle((cl_mem) (kstride_.gpu_data()), &ctx),
WrapHandle((cl_mem) (pad_.gpu_data()), &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
}
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingNDLayer);
} // namespace caffe
| d7d7d3800ddab91546b4bf71a0a6687844c524c3.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif // USE_GREENTEA
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void MaxPoolNDForward(const int n, const int num_axes,
const Dtype* bottom_data,
const int channels, const int* size,
const int* pooled_size, const int* kernel_size,
const int* ext_kernel_size, const int* stride,
const int* kstride, const int* pad,
Dtype* top_data, int* mask, Dtype* top_mask) {
int d_idx[6]; // NOLINT(runtime/arrays)
int d_start[6]; // NOLINT(runtime/arrays)
int d_end[6]; // NOLINT(runtime/arrays)
int d_iter[6]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
int offset = 1;
int num = index;
for (i = num_axes - 1; i >= 0; --i) {
d_idx[i] = index % pooled_size[i];
d_start[i] = d_idx[i] * stride[i] - pad[i];
d_end[i] = min(d_start[i] + ext_kernel_size[i], size[i]);
d_start[i] = max(d_start[i], 0);
num /= pooled_size[i];
offset *= size[i];
d_iter[i] = d_start[i];
if (d_start[i] >= d_end[i]) {
top_data[index] = -FLT_MAX;
if (mask) {
mask[index] = -1;
} else {
top_mask[index] = -1;
}
return;
}
}
int chan = num % channels;
num /= channels;
offset *= (num * channels + chan);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
int final_offset = 0;
bool incremented;
do {
final_offset = offset;
int size_prod = 1;
for (i = num_axes - 1; i >= 0; --i) {
final_offset += d_iter[i] * size_prod;
size_prod *= size[i];
}
if (bottom_data[final_offset] > maxval) {
maxidx = final_offset;
maxval = bottom_data[maxidx];
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
if (d_iter[i] >= d_end[i] - kstride[i]) {
d_iter[i] = d_start[i];
} else {
d_iter[i] += kstride[i];
incremented = true;
break;
}
}
} while (incremented);
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void PoolingNDLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolNDForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, num_spatial_axes_, bottom_data,
channels_, size_.gpu_data(), pooled_size_.gpu_data(),
kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(),
stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(),
top_data, mask, top_mask);
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_forward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_forward_nd"));
viennacl::ocl::enqueue(
oclk_max_pool_forward(count, num_spatial_axes_,
WrapHandle((cl_mem)bottom_data, &ctx),
channels_,
WrapHandle((cl_mem)(size_.gpu_data()), &ctx),
WrapHandle((cl_mem)(pooled_size_.gpu_data()), &ctx),
WrapHandle((cl_mem)(kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem)(ext_kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem)(stride_.gpu_data()), &ctx),
WrapHandle((cl_mem)(kstride_.gpu_data()), &ctx),
WrapHandle((cl_mem)(pad_.gpu_data()), &ctx),
WrapHandle((cl_mem)top_data, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem)mask, &ctx),
WrapHandle((cl_mem)top_mask, &ctx)),
ctx.get_queue());
}
break;
default: {
LOG(FATAL)<< "Unknown pooling method.";
}
}
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void MaxPoolNDBackward(const int n, const int num_axes,
const Dtype* top_diff, const int* mask,
const Dtype* top_mask,
const int channels, const int* size,
const int* pooled_size,
const int* kernel_size,
const int* ext_kernel_size, const int* stride,
const int* kstride, const int* pad,
Dtype* bottom_diff) {
int d_idx[6]; // NOLINT(runtime/arrays)
int d_start[6]; // NOLINT(runtime/arrays)
int d_end[6]; // NOLINT(runtime/arrays)
int d_iter[6]; // NOLINT(runtime/arrays)
int i;
CUDA_KERNEL_LOOP(index, n) {
// find out the local index
// find out the local offset
int offset = 1;
int num = index;
for (i = num_axes - 1; i >= 0; --i) {
d_idx[i] = num % size[i];
d_start[i] = (d_idx[i] < ext_kernel_size[i]) ?
d_idx[i] % kstride[i] : (d_idx[i] - ext_kernel_size[i]) + 1;
d_end[i] = (d_idx[i] >= pooled_size[i]) ?
(pooled_size[i] - 1) - (pooled_size[i] - 1 - d_start[i]) %
kstride[i] : d_idx[i];
num /= size[i];
offset *= pooled_size[i];
d_iter[i] = d_start[i];
if (d_start[i] > d_end[i]) {
bottom_diff[index] = 0;
return;
}
}
int chan = num % channels;
num /= channels;
offset *= (num * channels + chan);
Dtype gradient = 0;
int final_offset = 0;
int im_offset = 0;
bool incremented;
do {
final_offset = offset;
im_offset = 0;
int size_prod = 1;
int pooled_size_prod = 1;
for (i = num_axes - 1; i >= 0; --i) {
final_offset += d_iter[i] * pooled_size_prod;
im_offset += d_idx[i] * size_prod;
size_prod *= size[i];
pooled_size_prod *= pooled_size[i];
}
if (mask) {
if (mask[final_offset] == im_offset) {
gradient += top_diff[final_offset];
}
} else {
if (top_mask[final_offset] == im_offset) {
gradient += top_diff[final_offset];
}
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
if (d_iter[i] > d_end[i] - kstride[i]) {
d_iter[i] = d_start[i];
} else {
d_iter[i] += kstride[i];
incremented = true;
break;
}
}
} while (incremented);
bottom_diff[index] = gradient;
}
}
#endif // USE_CUDA
template<typename Dtype>
void PoolingNDLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(count, Dtype(0.), bottom_diff);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolNDBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, num_spatial_axes_, top_diff, mask, top_mask,
channels_, size_.gpu_data(), pooled_size_.gpu_data(),
kernel_shape_.gpu_data(), ext_kernel_shape_.gpu_data(),
stride_.gpu_data(), kstride_.gpu_data(), pad_.gpu_data(),
bottom_diff);
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
greentea_gpu_set(this->device_context_->id(), count, Dtype(0.),
(cl_mem) bottom_diff, 0);
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX: {
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
viennacl::ocl::kernel &oclk_max_pool_backward = program.get_kernel(
CL_KERNEL_SELECT("max_pool_backward_nd"));
viennacl::ocl::enqueue(
oclk_max_pool_backward(
count, num_spatial_axes_, WrapHandle((cl_mem) top_diff, &ctx),
mask == NULL ? 0 : 1,
WrapHandle((cl_mem) mask, &ctx),
WrapHandle((cl_mem) top_mask, &ctx), channels_,
WrapHandle((cl_mem) (size_.gpu_data()), &ctx),
WrapHandle((cl_mem) (pooled_size_.gpu_data()), &ctx),
WrapHandle((cl_mem) (kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem) (ext_kernel_shape_.gpu_data()), &ctx),
WrapHandle((cl_mem) (stride_.gpu_data()), &ctx),
WrapHandle((cl_mem) (kstride_.gpu_data()), &ctx),
WrapHandle((cl_mem) (pad_.gpu_data()), &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
}
break;
default:
LOG(FATAL)<<
"Unknown or unsupported pooling method in Backward_gpu().";
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingNDLayer);
} // namespace caffe
|
372931554e1ffeaf501327de16c508f164cd5814.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifndef _MSC_VER
const float kBNLL_THRESHOLD = 50.;
#endif
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
#ifdef _MSC_VER
const float kBNLL_THRESHOLD = 50.;
#endif
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
| 372931554e1ffeaf501327de16c508f164cd5814.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#ifndef _MSC_VER
const float kBNLL_THRESHOLD = 50.;
#endif
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
#ifdef _MSC_VER
const float kBNLL_THRESHOLD = 50.;
#endif
Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
|
6bbbf2524fa6875e9d5fd49dbe5e4563a456c4a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename T>
__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width, unsigned int mask = 0xffffffff)
{
#if TORCH_HIP_VERSION >= 9000
return __shfl_sync(mask, value, srcLane, width);
#else
return __shfl(value, srcLane, width);
#endif
}
template <typename T>
__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
{
#if TORCH_HIP_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
template <typename U>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
template <typename U>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count,
const int& warp_size) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf,
const int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U>(curr, mu, sigma2, count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x + (1 << l)) & 31;
U muB = WARP_SHFL(mu, srcLaneB, warp_size);
U countB = WARP_SHFL(count, srcLaneB, warp_size);
U sigma2B = WARP_SHFL(sigma2, srcLaneB, warp_size);
cuChanOnlineSum<U>(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U>(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0, warp_size);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0, warp_size);
}
}
}
template <>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf,
const int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum(curr.x, mu, sigma2, count);
cuWelfordOnlineSum(curr.y, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum(curr, mu, sigma2, count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x + (1 << l)) & 31;
float muB = WARP_SHFL(mu, srcLaneB, warp_size);
float countB = WARP_SHFL(count, srcLaneB, warp_size);
float sigma2B = WARP_SHFL(sigma2, srcLaneB, warp_size);
cuChanOnlineSum(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0, warp_size);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0, warp_size);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U>
__global__ void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ invvar,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta,
int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensors are contiguous
//
for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2(vals, n1, n2, i1, mu, sigma2, buf, warp_size);
const T* lvals = vals + i1 * n2;
T* ovals = output_vals + i1 * n2;
U c_invvar = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
if (gamma != NULL && beta != NULL) {
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = gamma[i] * static_cast<T>(c_invvar * (curr - mu)) + beta[i];
}
} else {
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = static_cast<T>(c_invvar * (curr - mu));
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (invvar != nullptr) invvar[i1] = c_invvar;
}
}
}
template <typename T, typename U>
void HostApplyLayerNorm(
T* output,
U* mean,
U* invvar,
const T* input,
int64_t n1,
int64_t n2,
double epsilon,
const T* gamma,
const T* beta) {
const dim3 threads(32, 4, 1);
const hipDeviceProp_t& prop = DeviceProp::GetDeviceProps();
const uint64_t maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
// const uint64_t maxGridY = 32;
const dim3 blocks(1, ::min((uint64_t)n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
hipLaunchKernelGGL(( cuApplyLayerNorm), dim3(blocks), dim3(threads), nshared, 0,
output,
mean,
invvar,
input,
n1, n2,
U(epsilon),
gamma, beta, warp_size);
}
#define LAYERNORM_LINEAR_IMPL(T, U) \
template void HostApplyLayerNorm(T* output, U* mean, U* invvar, const T* input, int64_t n1, int64_t n2, \
double epsilon, const T* gamma, const T* beta);
LAYERNORM_LINEAR_IMPL(float, float)
LAYERNORM_LINEAR_IMPL(half, float)
LAYERNORM_LINEAR_IMPL(double, float)
//LAYERNORM_LINEAR_IMPL(half, half)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 6bbbf2524fa6875e9d5fd49dbe5e4563a456c4a5.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename T>
__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width, unsigned int mask = 0xffffffff)
{
#if CUDA_VERSION >= 9000
return __shfl_sync(mask, value, srcLane, width);
#else
return __shfl(value, srcLane, width);
#endif
}
template <typename T>
__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff)
{
#if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width);
#else
return __shfl_xor(value, laneMask, width);
#endif
}
template <typename U>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
template <typename U>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count,
const int& warp_size) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf,
const int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U>(curr, mu, sigma2, count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x + (1 << l)) & 31;
U muB = WARP_SHFL(mu, srcLaneB, warp_size);
U countB = WARP_SHFL(count, srcLaneB, warp_size);
U sigma2B = WARP_SHFL(sigma2, srcLaneB, warp_size);
cuChanOnlineSum<U>(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U>(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0, warp_size);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0, warp_size);
}
}
}
template <>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf,
const int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum(curr.x, mu, sigma2, count);
cuWelfordOnlineSum(curr.y, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum(curr, mu, sigma2, count);
}
// intra-warp reductions
for (int l = 0; l <= 4; ++l) {
int srcLaneB = (threadIdx.x + (1 << l)) & 31;
float muB = WARP_SHFL(mu, srcLaneB, warp_size);
float countB = WARP_SHFL(count, srcLaneB, warp_size);
float sigma2B = WARP_SHFL(sigma2, srcLaneB, warp_size);
cuChanOnlineSum(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum(muB, sigma2B, countB, mu, sigma2, count, warp_size);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0, warp_size);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0, warp_size);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U>
__global__ void cuApplyLayerNorm(
T* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ invvar,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const T* __restrict__ gamma,
const T* __restrict__ beta,
int warp_size) {
// Assumptions:
// 1) blockDim.x == warpSize
// 2) Tensors are contiguous
//
for (auto i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2(vals, n1, n2, i1, mu, sigma2, buf, warp_size);
const T* lvals = vals + i1 * n2;
T* ovals = output_vals + i1 * n2;
U c_invvar = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
if (gamma != NULL && beta != NULL) {
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = gamma[i] * static_cast<T>(c_invvar * (curr - mu)) + beta[i];
}
} else {
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
ovals[i] = static_cast<T>(c_invvar * (curr - mu));
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (invvar != nullptr) invvar[i1] = c_invvar;
}
}
}
template <typename T, typename U>
void HostApplyLayerNorm(
T* output,
U* mean,
U* invvar,
const T* input,
int64_t n1,
int64_t n2,
double epsilon,
const T* gamma,
const T* beta) {
const dim3 threads(32, 4, 1);
const cudaDeviceProp& prop = DeviceProp::GetDeviceProps();
const uint64_t maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
// const uint64_t maxGridY = 32;
const dim3 blocks(1, std::min((uint64_t)n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
cuApplyLayerNorm<<<blocks, threads, nshared, 0>>>(
output,
mean,
invvar,
input,
n1, n2,
U(epsilon),
gamma, beta, warp_size);
}
#define LAYERNORM_LINEAR_IMPL(T, U) \
template void HostApplyLayerNorm(T* output, U* mean, U* invvar, const T* input, int64_t n1, int64_t n2, \
double epsilon, const T* gamma, const T* beta);
LAYERNORM_LINEAR_IMPL(float, float)
LAYERNORM_LINEAR_IMPL(half, float)
LAYERNORM_LINEAR_IMPL(double, float)
//LAYERNORM_LINEAR_IMPL(half, half)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
4cb27c9fe272ee1ae448656422f64b0778fa8868.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define N 32
// Kernel definition
__global__ void MatAdd(int A[N][N], int B[N][N], int C[N][N])
{
//how to get the element index, fill out the values of indexi and indexj
int indexi = blockIdx.x*blockDim.x;
int indexj = blockIdx.y*blockDim.y;
if (indexi < N && indexj < N)
C[indexi][indexj] = A[indexi][indexj] + B[indexi][indexj];
}
int main() {
int a_h[N][N], b_h[N][N], c_h[N][N];
int (*a_d)[N], (*b_d)[N], (*c_d)[N];
int size = N*N*sizeof(int);
//allocate the memory on the device
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&b_d, size);
hipMalloc((void**)&c_d, size);
//assign values to matrixes
for(int i=0; i<N; i++)
for(int j=0; j<N; j++)
{
a_h[i][j] = j;
b_h[i][j] = i;
}
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", a_h[i][j]);
}
printf("\n\n");
}
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", b_h[i][j]);
}
printf("\n\n");
}
//copy matrixes to the device
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, size, hipMemcpyHostToDevice);
// launch kernel on the device with 1 block of N*N threads
int threadsPerBlock = 1;
dim3 numBlocks(N, N);
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, a_d, b_d, c_d);
//copy results from the device to the host
hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost);
//print the results
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", c_h[i][j]);
}
printf("\n");
}
//free the memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
}
| 4cb27c9fe272ee1ae448656422f64b0778fa8868.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define N 32
// Kernel definition
__global__ void MatAdd(int A[N][N], int B[N][N], int C[N][N])
{
//how to get the element index, fill out the values of indexi and indexj
int indexi = blockIdx.x*blockDim.x;
int indexj = blockIdx.y*blockDim.y;
if (indexi < N && indexj < N)
C[indexi][indexj] = A[indexi][indexj] + B[indexi][indexj];
}
int main() {
int a_h[N][N], b_h[N][N], c_h[N][N];
int (*a_d)[N], (*b_d)[N], (*c_d)[N];
int size = N*N*sizeof(int);
//allocate the memory on the device
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
//assign values to matrixes
for(int i=0; i<N; i++)
for(int j=0; j<N; j++)
{
a_h[i][j] = j;
b_h[i][j] = i;
}
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", a_h[i][j]);
}
printf("\n\n");
}
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", b_h[i][j]);
}
printf("\n\n");
}
//copy matrixes to the device
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
// launch kernel on the device with 1 block of N*N threads
int threadsPerBlock = 1;
dim3 numBlocks(N, N);
MatAdd<<<numBlocks, threadsPerBlock>>>(a_d, b_d, c_d);
//copy results from the device to the host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
//print the results
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", c_h[i][j]);
}
printf("\n");
}
//free the memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
2ce2decf9801d1c20484e0b9044facd248c4808e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Using target data to promote data allocation to higher level, enabling reusing in iterations
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
#include "libxomp.h"
#include "xomp_cuda_lib_inlined.cu"
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,((struct timezone *)((void *)0)));
time = t . tv_sec + 1.0e-6 * t . tv_usec;
return time;
}
double time1;
double time2;
void driver();
void initialize();
void jacobi();
void error_check();
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 512
int n;
int m;
int mits;
#define REAL float // flexible between float and double
// depending on MSIZE!!
float error_ref = 9.212767E-04;
float resid_ref = 2.355429E-08;
float tol;
float relax = 1.0;
float alpha = 0.0543;
float u[512][512];
float f[512][512];
float uold[512][512];
float dx;
float dy;
int main()
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 512;
m = 512;
tol = 0.0000000001;
mits = 5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#endif
#endif
driver();
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2 - time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
//double PI=3.1415926;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + (dx * (i - 1))));
yy = ((int )(- 1.0 + (dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
__global__ void OUT__1__8713__(int n,int m,float omega,float ax,float ay,float b,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold)
{
int _p_i;
int _p_j;
float _p_error;
_p_error = 0;
float _p_resid;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(1,n - 1 - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,n - 1 - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) {
for (_p_j = 1; _p_j < m - 1; _p_j++) {
_p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b;
_dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid;
_p_error = _p_error + _p_resid * _p_resid;
}
}
xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6);
}
__global__ void OUT__2__8713__(int n,int m,float *_dev_u,float *_dev_uold)
{
int _p_i;
int _p_j;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,n - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,n - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) {
for (_p_j = 0; _p_j < m; _p_j++)
_dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j];
}
}
void jacobi()
{
float omega;
int i;
int j;
int k;
float error;
float resid;
float ax;
float ay;
float b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
/* Translated from #pragma omp target data ... */
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,1,1)));
float *_dev_f;
int _dev_f_size = sizeof(float ) * n * m;
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)f),_dev_f_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,0,0)));
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,0,1)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock();
int _num_blocks_ = xomp_get_max1DBlock(n - 1 - 0 + 1);
hipLaunchKernelGGL(( OUT__2__8713__), dim3(_num_blocks_),dim3(_threads_per_block_), 0, 0, n,m,_dev_u,_dev_uold);
xomp_deviceDataEnvironmentExit();
}
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,0,1)));
float *_dev_f;
int _dev_f_size = sizeof(float ) * n * m;
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)f),_dev_f_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,1,0)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock();
int _num_blocks_ = xomp_get_max1DBlock(n - 1 - 1 - 1 + 1);
float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float )));
hipLaunchKernelGGL(( OUT__1__8713__), dim3(_num_blocks_),dim3(_threads_per_block_),(_threads_per_block_ * sizeof(float )), 0, n,m,omega,ax,ay,b,_dev_per_block_error,_dev_u,_dev_f,_dev_uold);
error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6);
xomp_freeDevice(_dev_per_block_error);
xomp_deviceDataEnvironmentExit();
}
// }
/* omp end parallel */
/* Error check */
if (k % 500 == 0) {
printf("Finished %d iteration with error =%f\n",k,error);
}
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
xomp_deviceDataEnvironmentExit();
}
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
printf("Residual_ref :%E\n",resid_ref);
printf("Diff ref=%E\n",(fabs((error - resid_ref))));
fabs((error - resid_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-14","jacobi-ompacc-opt1.c",235,__PRETTY_FUNCTION__);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check()
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
error = 0.0;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf("Diff ref=%E\n",(fabs((error - error_ref))));
fabs((error - error_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-14","jacobi-ompacc-opt1.c",267,__PRETTY_FUNCTION__);
}
| 2ce2decf9801d1c20484e0b9044facd248c4808e.cu | // Using target data to promote data allocation to higher level, enabling reusing in iterations
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
#include "libxomp.h"
#include "xomp_cuda_lib_inlined.cu"
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t,((struct timezone *)((void *)0)));
time = t . tv_sec + 1.0e-6 * t . tv_usec;
return time;
}
double time1;
double time2;
void driver();
void initialize();
void jacobi();
void error_check();
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 512
int n;
int m;
int mits;
#define REAL float // flexible between float and double
// depending on MSIZE!!
float error_ref = 9.212767E-04;
float resid_ref = 2.355429E-08;
float tol;
float relax = 1.0;
float alpha = 0.0543;
float u[512][512];
float f[512][512];
float uold[512][512];
float dx;
float dy;
int main()
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 512;
m = 512;
tol = 0.0000000001;
mits = 5000;
#if 0 // Not yet support concurrent CPU and GPU threads
#ifdef _OPENMP
#endif
#endif
driver();
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2 - time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
//double PI=3.1415926;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int )(- 1.0 + (dx * (i - 1))));
yy = ((int )(- 1.0 + (dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy)));
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
__global__ void OUT__1__8713__(int n,int m,float omega,float ax,float ay,float b,float *_dev_per_block_error,float *_dev_u,float *_dev_f,float *_dev_uold)
{
int _p_i;
int _p_j;
float _p_error;
_p_error = 0;
float _p_resid;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(1,n - 1 - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,n - 1 - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) {
for (_p_j = 1; _p_j < m - 1; _p_j++) {
_p_resid = (ax * (_dev_uold[(_p_i - 1) * 512 + _p_j] + _dev_uold[(_p_i + 1) * 512 + _p_j]) + ay * (_dev_uold[_p_i * 512 + (_p_j - 1)] + _dev_uold[_p_i * 512 + (_p_j + 1)]) + b * _dev_uold[_p_i * 512 + _p_j] - _dev_f[_p_i * 512 + _p_j]) / b;
_dev_u[_p_i * 512 + _p_j] = _dev_uold[_p_i * 512 + _p_j] - omega * _p_resid;
_p_error = _p_error + _p_resid * _p_resid;
}
}
xomp_inner_block_reduction_float(_p_error,_dev_per_block_error,6);
}
__global__ void OUT__2__8713__(int n,int m,float *_dev_u,float *_dev_uold)
{
int _p_i;
int _p_j;
int _dev_lower;
int _dev_upper;
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
int _dev_thread_num = getCUDABlockThreadCount(1);
int _dev_thread_id = getLoopIndexFromCUDAVariables(1);
XOMP_static_sched_init(0,n - 1,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride);
while(XOMP_static_sched_next(&_dev_loop_sched_index,n - 1,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper))
for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) {
for (_p_j = 0; _p_j < m; _p_j++)
_dev_uold[_p_i * 512 + _p_j] = _dev_u[_p_i * 512 + _p_j];
}
}
void jacobi()
{
float omega;
int i;
int j;
int k;
float error;
float resid;
float ax;
float ay;
float b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha);
error = (10.0 * tol);
k = 1;
/* Translated from #pragma omp target data ... */
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,1,1)));
float *_dev_f;
int _dev_f_size = sizeof(float ) * n * m;
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)f),_dev_f_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,0,0)));
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,0,1)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock();
int _num_blocks_ = xomp_get_max1DBlock(n - 1 - 0 + 1);
OUT__2__8713__<<<_num_blocks_,_threads_per_block_>>>(n,m,_dev_u,_dev_uold);
xomp_deviceDataEnvironmentExit();
}
{
xomp_deviceDataEnvironmentEnter();
float *_dev_u;
int _dev_u_size = sizeof(float ) * n * m;
_dev_u = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)u),_dev_u_size,0,1)));
float *_dev_f;
int _dev_f_size = sizeof(float ) * n * m;
_dev_f = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)f),_dev_f_size,1,0)));
float *_dev_uold;
int _dev_uold_size = sizeof(float ) * n * m;
_dev_uold = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(((void *)uold),_dev_uold_size,1,0)));
/* Launch CUDA kernel ... */
int _threads_per_block_ = xomp_get_maxThreadsPerBlock();
int _num_blocks_ = xomp_get_max1DBlock(n - 1 - 1 - 1 + 1);
float *_dev_per_block_error = (float *)(xomp_deviceMalloc(_num_blocks_ * sizeof(float )));
OUT__1__8713__<<<_num_blocks_,_threads_per_block_,(_threads_per_block_ * sizeof(float ))>>>(n,m,omega,ax,ay,b,_dev_per_block_error,_dev_u,_dev_f,_dev_uold);
error = xomp_beyond_block_reduction_float(_dev_per_block_error,_num_blocks_,6);
xomp_freeDevice(_dev_per_block_error);
xomp_deviceDataEnvironmentExit();
}
// }
/* omp end parallel */
/* Error check */
if (k % 500 == 0) {
printf("Finished %d iteration with error =%f\n",k,error);
}
error = (sqrt(error) / (n * m));
k = k + 1;
/* End iteration loop */
}
xomp_deviceDataEnvironmentExit();
}
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
printf("Residual_ref :%E\n",resid_ref);
printf("Diff ref=%E\n",(fabs((error - resid_ref))));
fabs((error - resid_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-resid_ref) < 1E-14","jacobi-ompacc-opt1.c",235,__PRETTY_FUNCTION__);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check()
{
int i;
int j;
float xx;
float yy;
float temp;
float error;
dx = (2.0 / (n - 1));
dy = (2.0 / (m - 1));
error = 0.0;
//#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (- 1.0 + (dx * (i - 1)));
yy = (- 1.0 + (dy * (j - 1)));
temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy)));
error = error + temp * temp;
}
error = (sqrt(error) / (n * m));
printf("Solution Error :%E \n",error);
printf("Solution Error Ref :%E \n",error_ref);
printf("Diff ref=%E\n",(fabs((error - error_ref))));
fabs((error - error_ref)) < 1E-14?((void )0) : __assert_fail("fabs(error-error_ref) < 1E-14","jacobi-ompacc-opt1.c",267,__PRETTY_FUNCTION__);
}
|
63e3aa06e52af70aa4617d73d6f342699ea417a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const float *Ws = NULL;
hipMalloc(&Ws, XSIZE*YSIZE);
const float *mus = NULL;
hipMalloc(&mus, XSIZE*YSIZE);
const float *W = NULL;
hipMalloc(&W, XSIZE*YSIZE);
const float *mu = NULL;
hipMalloc(&mu, XSIZE*YSIZE);
const bool *iMatch = NULL;
hipMalloc(&iMatch, XSIZE*YSIZE);
const int *iC = NULL;
hipMalloc(&iC, XSIZE*YSIZE);
const int *Wh = NULL;
hipMalloc(&Wh, XSIZE*YSIZE);
float *cmax = NULL;
hipMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 63e3aa06e52af70aa4617d73d6f342699ea417a8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const float *Ws = NULL;
cudaMalloc(&Ws, XSIZE*YSIZE);
const float *mus = NULL;
cudaMalloc(&mus, XSIZE*YSIZE);
const float *W = NULL;
cudaMalloc(&W, XSIZE*YSIZE);
const float *mu = NULL;
cudaMalloc(&mu, XSIZE*YSIZE);
const bool *iMatch = NULL;
cudaMalloc(&iMatch, XSIZE*YSIZE);
const int *iC = NULL;
cudaMalloc(&iC, XSIZE*YSIZE);
const int *Wh = NULL;
cudaMalloc(&Wh, XSIZE*YSIZE);
float *cmax = NULL;
cudaMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeCost<<<gridBlock,threadBlock>>>(Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeCost<<<gridBlock,threadBlock>>>(Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeCost<<<gridBlock,threadBlock>>>(Params,Ws,mus,W,mu,iMatch,iC,Wh,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
68da458321e4a0224a191e00a494f6d7bf2a1aea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test_utils.h"
Eigen::MatrixXf extractChannel(const renderer::OutputTensor& output, int channel)
{
return output.slice(channel).eval().toEigen();
}
__constant__ float TestConstantMemory[16];
__global__ void TestCopyConstantToMemory(dim3 virtual_size, float* output)
{
CUMAT_KERNEL_1D_LOOP(i, virtual_size)
{
output[i] = TestConstantMemory[i];
}
CUMAT_KERNEL_1D_LOOP_END
}
void testConstantMemory()
{
float data[16] = { 1,2,3,4,5,6,7,8,9,12,1 };
CUMAT_SAFE_CALL(hipMemcpyToSymbol(TestConstantMemory, data, sizeof(float) * 16));
cuMat::VectorXf out(16);
cuMat::Context& ctx = cuMat::Context::current();
const auto cfg = ctx.createLaunchConfig1D(16, TestCopyConstantToMemory);
hipLaunchKernelGGL(( TestCopyConstantToMemory)
, dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() ,
cfg.virtual_size, out.data());
CUMAT_CHECK_ERROR();
const auto outCpu = out.toEigen();
std::cout << outCpu << std::endl;
} | 68da458321e4a0224a191e00a494f6d7bf2a1aea.cu | #include "test_utils.h"
Eigen::MatrixXf extractChannel(const renderer::OutputTensor& output, int channel)
{
return output.slice(channel).eval().toEigen();
}
__constant__ float TestConstantMemory[16];
__global__ void TestCopyConstantToMemory(dim3 virtual_size, float* output)
{
CUMAT_KERNEL_1D_LOOP(i, virtual_size)
{
output[i] = TestConstantMemory[i];
}
CUMAT_KERNEL_1D_LOOP_END
}
void testConstantMemory()
{
float data[16] = { 1,2,3,4,5,6,7,8,9,12,1 };
CUMAT_SAFE_CALL(cudaMemcpyToSymbol(TestConstantMemory, data, sizeof(float) * 16));
cuMat::VectorXf out(16);
cuMat::Context& ctx = cuMat::Context::current();
const auto cfg = ctx.createLaunchConfig1D(16, TestCopyConstantToMemory);
TestCopyConstantToMemory
<<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (
cfg.virtual_size, out.data());
CUMAT_CHECK_ERROR();
const auto outCpu = out.toEigen();
std::cout << outCpu << std::endl;
} |
50e7772105b1000225d5979c310655f614c0b009.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/utilities.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/logical.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <algorithm>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> concatenate(table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS(num_columns > 0, "At least one column must be specified");
// check all columns are of type string
CUDF_EXPECTS(std::all_of(strings_columns.begin(),
strings_columns.end(),
[](auto c) { return c.type().id() == type_id::STRING; }),
"All columns must be of type string");
if (num_columns == 1) // single strings column returns a copy
return std::make_unique<column>(*(strings_columns.begin()), stream, mr);
auto strings_count = strings_columns.num_rows();
if (strings_count == 0) // empty begets empty
return detail::make_empty_strings_column(stream, mr);
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns, stream);
auto d_table = *table;
// create resulting null mask
auto valid_mask = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, d_narep] __device__(size_type idx) {
bool null_element = thrust::any_of(
thrust::seq, d_table.begin(), d_table.end(), [idx](auto col) { return col.is_null(idx); });
return (!null_element || d_narep.is_valid());
},
stream,
mr);
auto& null_mask = valid_mask.first;
auto const null_count = valid_mask.second;
// build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, d_separator, d_narep] __device__(size_type row_idx) {
// for this row (idx), iterate over each column and add up the bytes
bool null_element =
thrust::any_of(thrust::seq, d_table.begin(), d_table.end(), [row_idx](auto const& d_column) {
return d_column.is_null(row_idx);
});
if (null_element && !d_narep.is_valid()) return 0;
size_type bytes = thrust::transform_reduce(
thrust::seq,
d_table.begin(),
d_table.end(),
[row_idx, d_separator, d_narep] __device__(column_device_view const& d_column) {
return d_separator.size_bytes() + (d_column.is_null(row_idx)
? d_narep.size()
: d_column.element<string_view>(row_idx).size_bytes());
},
0,
thrust::plus<size_type>());
// separator goes only in between elements
if (bytes > 0) // if not null
bytes -= d_separator.size_bytes(); // remove the last separator
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
// fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_table, num_columns, d_separator, d_narep, d_results_offsets, d_results_chars] __device__(
size_type idx) {
bool null_element = thrust::any_of(
thrust::seq, d_table.begin(), d_table.end(), [idx](column_device_view const& col) {
return col.is_null(idx);
});
if (null_element && !d_narep.is_valid())
return; // do not write to buffer at all if any column element for this row is null
size_type offset = d_results_offsets[idx];
char* d_buffer = d_results_chars + offset;
// write out each column's entry for this row
for (size_type col_idx = 0; col_idx < num_columns; ++col_idx) {
auto d_column = d_table.column(col_idx);
string_view d_str =
d_column.is_null(idx) ? d_narep.value() : d_column.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
// separator goes only in between elements
if (col_idx + 1 < num_columns) d_buffer = detail::copy_string(d_buffer, d_separator);
}
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return detail::make_empty_strings_column(stream, mr);
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_vector<size_type> output_offsets(strings_count + 1);
auto d_output_offsets = output_offsets.data().get();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
CUDA_TRY(hipMemsetAsync(d_output_offsets, 0, sizeof(size_type), stream.value()));
// total size is the last entry
size_type bytes = output_offsets.back();
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDA_TRY(hipMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
hipMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
size_type null_count = 0;
rmm::device_buffer null_mask{0, stream, mr}; // init to null null-mask
if (strings.null_count() == strings_count && !narep.is_valid()) {
null_mask = cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr);
null_count = 1;
}
auto chars_column =
detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> concatenate(table_view const& strings_columns,
strings_column_view const& separators,
string_scalar const& separator_narep,
string_scalar const& col_narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS(num_columns > 0, "At least one column must be specified");
// Check if all columns are of type string
CUDF_EXPECTS(std::all_of(strings_columns.begin(),
strings_columns.end(),
[](auto c) { return c.type().id() == type_id::STRING; }),
"All columns must be of type string");
auto strings_count = strings_columns.num_rows();
CUDF_EXPECTS(strings_count == separators.size(),
"Separators column should be the same size as the strings columns");
if (strings_count == 0) // Empty begets empty
return detail::make_empty_strings_column(stream, mr);
// Invalid output column strings - null rows
string_view const invalid_str{nullptr, 0};
auto const separator_rep = get_scalar_device_view(const_cast<string_scalar&>(separator_narep));
auto const col_rep = get_scalar_device_view(const_cast<string_scalar&>(col_narep));
auto const separator_col_view_ptr = column_device_view::create(separators.parent(), stream);
auto const separator_col_view = *separator_col_view_ptr;
if (num_columns == 1) {
// Shallow copy of the resultant strings
rmm::device_vector<string_view> out_col_strings(strings_count);
// Device view of the only column in the table view
auto const col0_ptr = column_device_view::create(strings_columns.column(0), stream);
auto const col0 = *col0_ptr;
// Execute it on every element
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
out_col_strings.data().get(),
// Output depends on the separator
[col0, invalid_str, separator_col_view, separator_rep, col_rep] __device__(auto ridx) {
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return invalid_str;
if (col0.is_valid(ridx)) {
auto sv = col0.element<string_view>(ridx);
return sv.empty() ? string_view{} : sv;
} else if (col_rep.is_valid()) {
auto cv = col_rep.value();
return cv.empty() ? string_view{} : cv;
} else
return invalid_str;
});
return make_strings_column(out_col_strings, invalid_str, stream, mr);
}
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns, stream);
auto d_table = *table;
// Create resulting null mask
auto valid_mask = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, separator_col_view, separator_rep, col_rep] __device__(size_type ridx) {
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return false;
bool all_nulls =
thrust::all_of(thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& col) {
return col.is_null(ridx);
});
return all_nulls ? col_rep.is_valid() : true;
},
stream,
mr);
auto null_count = valid_mask.second;
// Build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, separator_col_view, separator_rep, col_rep] __device__(
size_type ridx) {
// If the separator value for the row is null and if there aren't global separator
// replacements, this row does not have any value - null row
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return 0;
// For this row (idx), iterate over each column and add up the bytes
bool all_nulls =
thrust::all_of(thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& d_column) {
return d_column.is_null(ridx);
});
// If all column values are null and there isn't a global column replacement value, this row
// is a null row
if (all_nulls && !col_rep.is_valid()) return 0;
// There is at least one non-null column value (it can still be empty though)
auto separator_str = separator_col_view.is_valid(ridx)
? separator_col_view.element<string_view>(ridx)
: separator_rep.value();
size_type bytes = thrust::transform_reduce(
thrust::seq,
d_table.begin(),
d_table.end(),
[ridx, separator_str, col_rep] __device__(column_device_view const& d_column) {
// If column is null and there isn't a valid column replacement, this isn't used in
// final string concatenate
if (d_column.is_null(ridx) && !col_rep.is_valid()) return 0;
return separator_str.size_bytes() + (d_column.is_null(ridx)
? col_rep.size()
: d_column.element<string_view>(ridx).size_bytes());
},
0,
thrust::plus<size_type>());
// Null/empty separator and columns doesn't produce a non-empty string
if (bytes == 0) assert(separator_str.size_bytes() == 0);
// Separator goes only in between elements
return static_cast<int32_t>(bytes - separator_str.size_bytes());
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// Create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
// Fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_table,
num_columns,
d_results_offsets,
d_results_chars,
separator_col_view,
separator_rep,
col_rep] __device__(size_type ridx) {
// If the separator for this row is null and if there isn't a valid separator
// to replace, do not write anything for this row
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return;
bool all_nulls = thrust::all_of(
thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& col) {
return col.is_null(ridx);
});
// If all column values are null and there isn't a valid column replacement,
// skip this row
if (all_nulls && !col_rep.is_valid()) return;
size_type offset = d_results_offsets[ridx];
char* d_buffer = d_results_chars + offset;
bool colval_written = false;
// There is at least one non-null column value (it can still be empty though)
auto separator_str = separator_col_view.is_valid(ridx)
? separator_col_view.element<string_view>(ridx)
: separator_rep.value();
// Write out each column's entry for this row
for (size_type col_idx = 0; col_idx < num_columns; ++col_idx) {
auto d_column = d_table.column(col_idx);
// If the column isn't valid and if there isn't a replacement for it, skip
// it
if (d_column.is_null(ridx) && !col_rep.is_valid()) continue;
// Separator goes only in between elements
if (colval_written)
d_buffer = detail::copy_string(d_buffer, separator_str);
string_view d_str = d_column.is_null(ridx)
? col_rep.value()
: d_column.element<string_view>(ridx);
d_buffer = detail::copy_string(d_buffer, d_str);
colval_written = true;
}
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
(null_count) ? std::move(valid_mask.first) : rmm::device_buffer{},
stream,
mr);
}
} // namespace detail
// APIs
std::unique_ptr<column> concatenate(table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(strings_columns, separator, narep, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> concatenate(table_view const& strings_columns,
strings_column_view const& separators,
string_scalar const& separator_narep,
string_scalar const& col_narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(
strings_columns, separators, separator_narep, col_narep, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 50e7772105b1000225d5979c310655f614c0b009.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/utilities.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/valid_if.cuh>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/strings/combine.hpp>
#include <cudf/strings/detail/combine.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/logical.h>
#include <thrust/transform_reduce.h>
#include <thrust/transform_scan.h>
#include <algorithm>
namespace cudf {
namespace strings {
namespace detail {
std::unique_ptr<column> concatenate(table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS(num_columns > 0, "At least one column must be specified");
// check all columns are of type string
CUDF_EXPECTS(std::all_of(strings_columns.begin(),
strings_columns.end(),
[](auto c) { return c.type().id() == type_id::STRING; }),
"All columns must be of type string");
if (num_columns == 1) // single strings column returns a copy
return std::make_unique<column>(*(strings_columns.begin()), stream, mr);
auto strings_count = strings_columns.num_rows();
if (strings_count == 0) // empty begets empty
return detail::make_empty_strings_column(stream, mr);
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns, stream);
auto d_table = *table;
// create resulting null mask
auto valid_mask = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, d_narep] __device__(size_type idx) {
bool null_element = thrust::any_of(
thrust::seq, d_table.begin(), d_table.end(), [idx](auto col) { return col.is_null(idx); });
return (!null_element || d_narep.is_valid());
},
stream,
mr);
auto& null_mask = valid_mask.first;
auto const null_count = valid_mask.second;
// build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, d_separator, d_narep] __device__(size_type row_idx) {
// for this row (idx), iterate over each column and add up the bytes
bool null_element =
thrust::any_of(thrust::seq, d_table.begin(), d_table.end(), [row_idx](auto const& d_column) {
return d_column.is_null(row_idx);
});
if (null_element && !d_narep.is_valid()) return 0;
size_type bytes = thrust::transform_reduce(
thrust::seq,
d_table.begin(),
d_table.end(),
[row_idx, d_separator, d_narep] __device__(column_device_view const& d_column) {
return d_separator.size_bytes() + (d_column.is_null(row_idx)
? d_narep.size()
: d_column.element<string_view>(row_idx).size_bytes());
},
0,
thrust::plus<size_type>());
// separator goes only in between elements
if (bytes > 0) // if not null
bytes -= d_separator.size_bytes(); // remove the last separator
return bytes;
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
// fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_table, num_columns, d_separator, d_narep, d_results_offsets, d_results_chars] __device__(
size_type idx) {
bool null_element = thrust::any_of(
thrust::seq, d_table.begin(), d_table.end(), [idx](column_device_view const& col) {
return col.is_null(idx);
});
if (null_element && !d_narep.is_valid())
return; // do not write to buffer at all if any column element for this row is null
size_type offset = d_results_offsets[idx];
char* d_buffer = d_results_chars + offset;
// write out each column's entry for this row
for (size_type col_idx = 0; col_idx < num_columns; ++col_idx) {
auto d_column = d_table.column(col_idx);
string_view d_str =
d_column.is_null(idx) ? d_narep.value() : d_column.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
// separator goes only in between elements
if (col_idx + 1 < num_columns) d_buffer = detail::copy_string(d_buffer, d_separator);
}
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
if (strings_count == 0) return detail::make_empty_strings_column(stream, mr);
CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar");
string_view d_separator(separator.data(), separator.size());
auto d_narep = get_scalar_device_view(const_cast<string_scalar&>(narep));
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create an offsets array for building the output memory layout
rmm::device_vector<size_type> output_offsets(strings_count + 1);
auto d_output_offsets = output_offsets.data().get();
// using inclusive-scan to compute last entry which is the total size
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_output_offsets + 1,
[d_strings, d_separator, d_narep] __device__(size_type idx) {
size_type bytes = 0;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid()) return 0; // skip nulls
bytes += d_narep.size();
} else
bytes += d_strings.element<string_view>(idx).size_bytes();
if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes();
return bytes;
},
thrust::plus<size_type>());
CUDA_TRY(cudaMemsetAsync(d_output_offsets, 0, sizeof(size_type), stream.value()));
// total size is the last entry
size_type bytes = output_offsets.back();
// build offsets column (only 1 string so 2 offset entries)
auto offsets_column =
make_numeric_column(data_type{type_id::INT32}, 2, mask_state::UNALLOCATED, stream, mr);
auto offsets_view = offsets_column->mutable_view();
// set the first entry to 0 and the last entry to bytes
int32_t new_offsets[] = {0, static_cast<int32_t>(bytes)};
CUDA_TRY(cudaMemcpyAsync(offsets_view.data<int32_t>(),
new_offsets,
sizeof(new_offsets),
cudaMemcpyHostToDevice,
stream.value()));
// build null mask
// only one entry so it is either all valid or all null
size_type null_count = 0;
rmm::device_buffer null_mask{0, stream, mr}; // init to null null-mask
if (strings.null_count() == strings_count && !narep.is_valid()) {
null_mask = cudf::detail::create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr);
null_count = 1;
}
auto chars_column =
detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) {
size_type offset = d_output_offsets[idx];
char* d_buffer = d_chars + offset;
if (d_strings.is_null(idx)) {
if (!d_narep.is_valid())
return; // do not write to buffer if element is null (including separator)
d_buffer = detail::copy_string(d_buffer, d_narep.value());
} else {
string_view d_str = d_strings.element<string_view>(idx);
d_buffer = detail::copy_string(d_buffer, d_str);
}
if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator);
});
return make_strings_column(1,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> concatenate(table_view const& strings_columns,
strings_column_view const& separators,
string_scalar const& separator_narep,
string_scalar const& col_narep,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto num_columns = strings_columns.num_columns();
CUDF_EXPECTS(num_columns > 0, "At least one column must be specified");
// Check if all columns are of type string
CUDF_EXPECTS(std::all_of(strings_columns.begin(),
strings_columns.end(),
[](auto c) { return c.type().id() == type_id::STRING; }),
"All columns must be of type string");
auto strings_count = strings_columns.num_rows();
CUDF_EXPECTS(strings_count == separators.size(),
"Separators column should be the same size as the strings columns");
if (strings_count == 0) // Empty begets empty
return detail::make_empty_strings_column(stream, mr);
// Invalid output column strings - null rows
string_view const invalid_str{nullptr, 0};
auto const separator_rep = get_scalar_device_view(const_cast<string_scalar&>(separator_narep));
auto const col_rep = get_scalar_device_view(const_cast<string_scalar&>(col_narep));
auto const separator_col_view_ptr = column_device_view::create(separators.parent(), stream);
auto const separator_col_view = *separator_col_view_ptr;
if (num_columns == 1) {
// Shallow copy of the resultant strings
rmm::device_vector<string_view> out_col_strings(strings_count);
// Device view of the only column in the table view
auto const col0_ptr = column_device_view::create(strings_columns.column(0), stream);
auto const col0 = *col0_ptr;
// Execute it on every element
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
out_col_strings.data().get(),
// Output depends on the separator
[col0, invalid_str, separator_col_view, separator_rep, col_rep] __device__(auto ridx) {
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return invalid_str;
if (col0.is_valid(ridx)) {
auto sv = col0.element<string_view>(ridx);
return sv.empty() ? string_view{} : sv;
} else if (col_rep.is_valid()) {
auto cv = col_rep.value();
return cv.empty() ? string_view{} : cv;
} else
return invalid_str;
});
return make_strings_column(out_col_strings, invalid_str, stream, mr);
}
// Create device views from the strings columns.
auto table = table_device_view::create(strings_columns, stream);
auto d_table = *table;
// Create resulting null mask
auto valid_mask = cudf::detail::valid_if(
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
[d_table, separator_col_view, separator_rep, col_rep] __device__(size_type ridx) {
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return false;
bool all_nulls =
thrust::all_of(thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& col) {
return col.is_null(ridx);
});
return all_nulls ? col_rep.is_valid() : true;
},
stream,
mr);
auto null_count = valid_mask.second;
// Build offsets column by computing sizes of each string in the output
auto offsets_transformer = [d_table, separator_col_view, separator_rep, col_rep] __device__(
size_type ridx) {
// If the separator value for the row is null and if there aren't global separator
// replacements, this row does not have any value - null row
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return 0;
// For this row (idx), iterate over each column and add up the bytes
bool all_nulls =
thrust::all_of(thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& d_column) {
return d_column.is_null(ridx);
});
// If all column values are null and there isn't a global column replacement value, this row
// is a null row
if (all_nulls && !col_rep.is_valid()) return 0;
// There is at least one non-null column value (it can still be empty though)
auto separator_str = separator_col_view.is_valid(ridx)
? separator_col_view.element<string_view>(ridx)
: separator_rep.value();
size_type bytes = thrust::transform_reduce(
thrust::seq,
d_table.begin(),
d_table.end(),
[ridx, separator_str, col_rep] __device__(column_device_view const& d_column) {
// If column is null and there isn't a valid column replacement, this isn't used in
// final string concatenate
if (d_column.is_null(ridx) && !col_rep.is_valid()) return 0;
return separator_str.size_bytes() + (d_column.is_null(ridx)
? col_rep.size()
: d_column.element<string_view>(ridx).size_bytes());
},
0,
thrust::plus<size_type>());
// Null/empty separator and columns doesn't produce a non-empty string
if (bytes == 0) assert(separator_str.size_bytes() == 0);
// Separator goes only in between elements
return static_cast<int32_t>(bytes - separator_str.size_bytes());
};
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), offsets_transformer);
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr);
auto d_results_offsets = offsets_column->view().data<int32_t>();
// Create the chars column
size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, stream, mr);
// Fill the chars column
auto d_results_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
[d_table,
num_columns,
d_results_offsets,
d_results_chars,
separator_col_view,
separator_rep,
col_rep] __device__(size_type ridx) {
// If the separator for this row is null and if there isn't a valid separator
// to replace, do not write anything for this row
if (!separator_col_view.is_valid(ridx) && !separator_rep.is_valid()) return;
bool all_nulls = thrust::all_of(
thrust::seq, d_table.begin(), d_table.end(), [ridx](auto const& col) {
return col.is_null(ridx);
});
// If all column values are null and there isn't a valid column replacement,
// skip this row
if (all_nulls && !col_rep.is_valid()) return;
size_type offset = d_results_offsets[ridx];
char* d_buffer = d_results_chars + offset;
bool colval_written = false;
// There is at least one non-null column value (it can still be empty though)
auto separator_str = separator_col_view.is_valid(ridx)
? separator_col_view.element<string_view>(ridx)
: separator_rep.value();
// Write out each column's entry for this row
for (size_type col_idx = 0; col_idx < num_columns; ++col_idx) {
auto d_column = d_table.column(col_idx);
// If the column isn't valid and if there isn't a replacement for it, skip
// it
if (d_column.is_null(ridx) && !col_rep.is_valid()) continue;
// Separator goes only in between elements
if (colval_written)
d_buffer = detail::copy_string(d_buffer, separator_str);
string_view d_str = d_column.is_null(ridx)
? col_rep.value()
: d_column.element<string_view>(ridx);
d_buffer = detail::copy_string(d_buffer, d_str);
colval_written = true;
}
});
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
(null_count) ? std::move(valid_mask.first) : rmm::device_buffer{},
stream,
mr);
}
} // namespace detail
// APIs
std::unique_ptr<column> concatenate(table_view const& strings_columns,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(strings_columns, separator, narep, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> join_strings(strings_column_view const& strings,
string_scalar const& separator,
string_scalar const& narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::join_strings(strings, separator, narep, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> concatenate(table_view const& strings_columns,
strings_column_view const& separators,
string_scalar const& separator_narep,
string_scalar const& col_narep,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(
strings_columns, separators, separator_narep, col_narep, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
5a4d7d148e56929538262d308c357f662fb4c86d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
// CUDA RumTime API
#define TILE_WIDTH 32
void MatrixMultiplyOnHost(float* M, float* N, float* P, int width)
{
for(int i=0; i<width; ++i)
{
for (int j=0; j<width; ++j)
{
float sum = 0;
for(int k=0; k<width; ++k)
{
float a = M[i*width+k];
float b = N[k*width+j];
sum += a*b;
}
P[i*width+j] = sum;
}
}
}
__global__ void MatirxMultiplyKernel(const float* devM, const float* devN, float* devP, const int width, const int tile_width)
{
__shared__ float sM[TILE_WIDTH][TILE_WIDTH];
__shared__ float sN[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int col = bx*tile_width+tx;
int row = by*tile_width+ty;
//Initialize accumulator to 0
float pValue = 0;
//Multiply and add
for(int m=0; m<width/tile_width;m++)
{
sM[ty][tx] = devM[row*width+(m*tile_width+tx)];
sN[ty][tx] = devN[col+(m*tile_width+ty)*width];
//each time bring one element from devM and devN into shared memory
//threads are blocked until all the threads reach this point
__syncthreads();
for(int k=0; k<tile_width; k++)
pValue += sM[ty][k]*sN[k][tx];
__syncthreads();
}
//Write value to device memory - each thread has unique index to write to
devP[row*width+col] = pValue;
}
void MatrixMultiplyOnDevice(const float* hostM, const float* hostN, float* hostP, const int width, const int tile_width)
{
int sizeInBytes = width*width*sizeof(float);
float *devM, *devN, *devP;
//Allocate M and N on devide
hipMalloc((void**)&devM, sizeInBytes);
hipMalloc((void**)&devN, sizeInBytes);
//Allocate P
hipMalloc((void**)&devP, sizeInBytes);
//Copy M and N from host to device
hipMemcpy(devM, hostM, sizeInBytes, hipMemcpyHostToDevice);
hipMemcpy(devN, hostN, sizeInBytes, hipMemcpyHostToDevice);
//Setup thread/block execution configuration
dim3 dimBlocks(tile_width,tile_width); //Each block has (width,width) threads
dim3 dimGrid(width/tile_width,width/tile_width); //Launch 1 block
//Launch the kernel
clock_t begin = clock();
hipLaunchKernelGGL(( MatirxMultiplyKernel), dim3(dimGrid),dim3(dimBlocks), 0, 0, devM,devN,devP,width,tile_width);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
printf("Matrix Multiply on Device: %fs\n",elapsed_secs);
//Copy P matrix from device to host
hipMemcpy(hostP, devP, sizeInBytes, hipMemcpyDeviceToHost);
//Free allocated memory
hipFree(devM); hipFree(devN); hipFree(devP);
}
void PrintMatrix(float* M, int width)
{
for(int i=0; i<width; i++)
{
for(int j=0; j<width; j++)
{
printf("%f ",M[i*width+j]);
}
printf("\n");
}
}
int main()
{
int width = 1024;
int tile_width = 32;
int size = width*width;
float* M = new float[size];
float* N = new float[size];
float* P = new float[size];
float* Q = new float[size];
srand (time(NULL));
for(int i=0; i<size; i++)
{
M[i] = rand() / (RAND_MAX + 1.);
N[i] = rand() / (RAND_MAX + 1.);
}
//multiply on host
clock_t begin = clock();
MatrixMultiplyOnHost(M,N,P,width);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
printf("Matrix Multiply on Host: %fs\n",elapsed_secs);
//std::cout << "Matrix Multiply on Host: " << elapsed_secs << std::endl;
//multiply on device
//1. Copy M,N matrices to device
//2. M*N on device
//3. Copy P matrix to host and output
MatrixMultiplyOnDevice(M,N,Q,width,tile_width);
float avg_err = 0;
for(int i=0; i<size; i++)
avg_err += fabs(P[i]-Q[i]);
avg_err /= size;
printf("Average error is: %f\n",avg_err);
//PrintMatrix(M,width);
//PrintMatrix(N,width);
//PrintMatrix(P,width);
//PrintMatrix(Q,width);
return 0;
} | 5a4d7d148e56929538262d308c357f662fb4c86d.cu | #include <stdio.h>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
// CUDA RumTime API
#define TILE_WIDTH 32
void MatrixMultiplyOnHost(float* M, float* N, float* P, int width)
{
for(int i=0; i<width; ++i)
{
for (int j=0; j<width; ++j)
{
float sum = 0;
for(int k=0; k<width; ++k)
{
float a = M[i*width+k];
float b = N[k*width+j];
sum += a*b;
}
P[i*width+j] = sum;
}
}
}
__global__ void MatirxMultiplyKernel(const float* devM, const float* devN, float* devP, const int width, const int tile_width)
{
__shared__ float sM[TILE_WIDTH][TILE_WIDTH];
__shared__ float sN[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int col = bx*tile_width+tx;
int row = by*tile_width+ty;
//Initialize accumulator to 0
float pValue = 0;
//Multiply and add
for(int m=0; m<width/tile_width;m++)
{
sM[ty][tx] = devM[row*width+(m*tile_width+tx)];
sN[ty][tx] = devN[col+(m*tile_width+ty)*width];
//each time bring one element from devM and devN into shared memory
//threads are blocked until all the threads reach this point
__syncthreads();
for(int k=0; k<tile_width; k++)
pValue += sM[ty][k]*sN[k][tx];
__syncthreads();
}
//Write value to device memory - each thread has unique index to write to
devP[row*width+col] = pValue;
}
void MatrixMultiplyOnDevice(const float* hostM, const float* hostN, float* hostP, const int width, const int tile_width)
{
int sizeInBytes = width*width*sizeof(float);
float *devM, *devN, *devP;
//Allocate M and N on devide
cudaMalloc((void**)&devM, sizeInBytes);
cudaMalloc((void**)&devN, sizeInBytes);
//Allocate P
cudaMalloc((void**)&devP, sizeInBytes);
//Copy M and N from host to device
cudaMemcpy(devM, hostM, sizeInBytes, cudaMemcpyHostToDevice);
cudaMemcpy(devN, hostN, sizeInBytes, cudaMemcpyHostToDevice);
//Setup thread/block execution configuration
dim3 dimBlocks(tile_width,tile_width); //Each block has (width,width) threads
dim3 dimGrid(width/tile_width,width/tile_width); //Launch 1 block
//Launch the kernel
clock_t begin = clock();
MatirxMultiplyKernel<<<dimGrid,dimBlocks>>>(devM,devN,devP,width,tile_width);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
printf("Matrix Multiply on Device: %fs\n",elapsed_secs);
//Copy P matrix from device to host
cudaMemcpy(hostP, devP, sizeInBytes, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree(devM); cudaFree(devN); cudaFree(devP);
}
void PrintMatrix(float* M, int width)
{
for(int i=0; i<width; i++)
{
for(int j=0; j<width; j++)
{
printf("%f ",M[i*width+j]);
}
printf("\n");
}
}
int main()
{
int width = 1024;
int tile_width = 32;
int size = width*width;
float* M = new float[size];
float* N = new float[size];
float* P = new float[size];
float* Q = new float[size];
srand (time(NULL));
for(int i=0; i<size; i++)
{
M[i] = rand() / (RAND_MAX + 1.);
N[i] = rand() / (RAND_MAX + 1.);
}
//multiply on host
clock_t begin = clock();
MatrixMultiplyOnHost(M,N,P,width);
clock_t end = clock();
float elapsed_secs = float(end - begin) / CLOCKS_PER_SEC;
printf("Matrix Multiply on Host: %fs\n",elapsed_secs);
//std::cout << "Matrix Multiply on Host: " << elapsed_secs << std::endl;
//multiply on device
//1. Copy M,N matrices to device
//2. M*N on device
//3. Copy P matrix to host and output
MatrixMultiplyOnDevice(M,N,Q,width,tile_width);
float avg_err = 0;
for(int i=0; i<size; i++)
avg_err += fabs(P[i]-Q[i]);
avg_err /= size;
printf("Average error is: %f\n",avg_err);
//PrintMatrix(M,width);
//PrintMatrix(N,width);
//PrintMatrix(P,width);
//PrintMatrix(Q,width);
return 0;
} |
9a9f88bbcc03fd13980491cfac10f11b25164250.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star3d4r-32x32-2-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 16;
const AN5D_TYPE __side3Len = 16;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_1_5;
float __reg_1_6;
float __reg_1_7;
float __reg_1_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); else out = reg4; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_1_2, 2);
__LOAD(__reg_1_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__CALC1(__reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_5, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_6, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_7, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 12);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, 13);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(5, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, 14);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_6, 15);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(7, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_7, 16);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 12);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, 13);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, 14);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, 15);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, 16);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
}
__e_sb = __e_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, __h + 6);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h + 2, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, __h + 6);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_6, __h + 7);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h + 2, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h + 3, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
__e_sb = __e_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, __h + 7);
__STORE(__h + 3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
}
}
| 9a9f88bbcc03fd13980491cfac10f11b25164250.cu | #include "star3d4r-32x32-2-256_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 16;
const AN5D_TYPE __side3Len = 16;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_1_5;
float __reg_1_6;
float __reg_1_7;
float __reg_1_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); else out = reg4; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_1_2, 2);
__LOAD(__reg_1_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__CALC1(__reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_5, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_6, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_7, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 12);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, 13);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(5, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, 14);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_6, 15);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(7, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_7, 16);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__LOAD(__reg_0_0, 9);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, 10);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 11);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 12);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, 13);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, 14);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, 15);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, 16);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
}
__e_sb = __e_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, __h + 6);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h + 2, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_8, __h + 0);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 7, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__LOAD(__reg_0_3, __h + 4);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__LOAD(__reg_0_4, __h + 5);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__LOAD(__reg_0_5, __h + 6);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 2, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_6, __h + 7);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 1, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h + 0, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h + 1, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h + 2, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h + 3, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__CALC1(__reg_1_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__STORE(__h - 8, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_5, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__STORE(__h - 8, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_6, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__STORE(__h - 8, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_7, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 8, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_8, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 8, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__CALC1(__reg_1_1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__STORE(__h - 8, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__CALC1(__reg_1_2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__STORE(__h - 8, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__CALC1(__reg_1_3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__STORE(__h - 8, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_7, __reg_1_8, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 4 - 4);
const AN5D_TYPE __c3Pad = (4);
#define __c3 c3
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
const AN5D_TYPE __halo3 = 4;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0_0;
float __reg_0_1;
float __reg_0_2;
float __reg_0_3;
float __reg_0_4;
float __reg_0_5;
float __reg_0_6;
float __reg_0_7;
float __reg_0_8;
__shared__ float __e_sb_double[__blockSize * 2];
float *__e_sb = __e_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g, __h, __i) do { __rn0 = (((((((((((((((((((((((((0.25000f * (__REGREF(__e, 0, 0))) + (0.03228f * (__SBREF(__e_sb, 0, -4)))) + (0.03138f * (__SBREF(__e_sb, 0, -3)))) + (0.03118f * (__SBREF(__e_sb, 0, -2)))) + (0.03027f * (__SBREF(__e_sb, 0, -1)))) + (0.03022f * (__SBREF(__e_sb, 0, 1)))) + (0.03112f * (__SBREF(__e_sb, 0, 2)))) + (0.03132f * (__SBREF(__e_sb, 0, 3)))) + (0.03222f * (__SBREF(__e_sb, 0, 4)))) + (0.03026f * (__REGREF(__d, 0, 0)))) + (0.03024f * (__REGREF(__f, 0, 0)))) + (0.03027f * (__SBREF(__e_sb, -1, 0)))) + (0.03023f * (__SBREF(__e_sb, 1, 0)))) + (0.03116f * (__REGREF(__c, 0, 0)))) + (0.03114f * (__REGREF(__g, 0, 0)))) + (0.03117f * (__SBREF(__e_sb, -2, 0)))) + (0.03113f * (__SBREF(__e_sb, 2, 0)))) + (0.03136f * (__REGREF(__b, 0, 0)))) + (0.03134f * (__REGREF(__h, 0, 0)))) + (0.03137f * (__SBREF(__e_sb, -3, 0)))) + (0.03133f * (__SBREF(__e_sb, 3, 0)))) + (0.03226f * (__REGREF(__a, 0, 0)))) + (0.03224f * (__REGREF(__i, 0, 0)))) + (0.03227f * (__SBREF(__e_sb, -4, 0)))) + (0.03223f * (__SBREF(__e_sb, 4, 0)))); } while (0)
#define __DB_SWITCH() do { __e_sb = &__e_sb_double[(__e_sb == __e_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e, f, g, h, i) do { __DB_SWITCH(); __e_sb[__tid] = e; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__LOAD(__reg_0_5, 5);
__LOAD(__reg_0_6, 6);
__LOAD(__reg_0_7, 7);
__LOAD(__reg_0_8, 8);
__STORE(4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
}
__e_sb = __e_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h - 1, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__LOAD(__reg_0_4, __h + 4);
__STORE(__h + 0, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_5, __h + 5);
__STORE(__h + 1, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__LOAD(__reg_0_6, __h + 6);
__STORE(__h + 2, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__LOAD(__reg_0_7, __h + 7);
__STORE(__h + 3, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 9;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 4, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 4, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_5, __h);
__STORE(__h - 4, __reg_0_6, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_6, __h);
__STORE(__h - 4, __reg_0_7, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_7, __h);
__STORE(__h - 4, __reg_0_8, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_8, __h);
__STORE(__h - 4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_7, __reg_0_8);
__h++;
}
}
|
ee1dc3f1b1ac42f44dadc1c3a1b46c8366886c86.hip | // !!! This is a file automatically generated by hipify!!!
#include "Kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#ifdef PARALLEL
int getNumOfBlock(int pointsArrSize, hipDeviceProp_t prop);
void checkErrorStatus(hipError_t e);
__device__ double calcDistanceCuda(double location1[DIMENSIONS], double location2[DIMENSIONS])
{
double powDistanceSum = 0;
int dimensionIndex;
for (dimensionIndex = 0; dimensionIndex < DIMENSIONS; dimensionIndex++)
{
powDistanceSum += pow((location1[dimensionIndex] - location2[dimensionIndex]), 2);
}
double sqrtD = sqrt(powDistanceSum);
return sqrtD;
}
__global__ void updatePointsLocation(Point * points, int numOfPoints, double timeInterval, int numOfThreadsPerBlock)
{
int pointIndex = blockIdx.x*numOfThreadsPerBlock + threadIdx.x;
if (pointIndex < numOfPoints)
{
int axisIndex;
for (axisIndex = 0; axisIndex < DIMENSIONS; axisIndex++)
{
points[pointIndex].location[axisIndex] += timeInterval*points[pointIndex].velocity[axisIndex];
}
}
}
__global__ void groupPoints(Cluster * clusters, int numOfClusters, Point ** points, int numOfPoints, int numOfThreadsPerBlock, bool * pointsMoved)
{
//*pointsMoved = false;
int pointIndex = blockIdx.x*numOfThreadsPerBlock + threadIdx.x;
if (pointIndex < numOfPoints)
{
Point * currentPoint = &((*points)[pointIndex]);
int clusterIndex = 0;
double minDistance = calcDistanceCuda(currentPoint->location, clusters[clusterIndex].center);
int closestClusterIndex = clusterIndex;
for (clusterIndex = 1; clusterIndex < numOfClusters; clusterIndex++)
{
double distance = calcDistanceCuda(currentPoint->location, clusters[clusterIndex].center);
if (distance < minDistance)
{
minDistance = distance;
closestClusterIndex = clusterIndex;
}
}
//update the current cluster
if (currentPoint->currentCluster != closestClusterIndex)
{
currentPoint->currentCluster = closestClusterIndex;
*pointsMoved = true;
}
}
}
Point * allocatePointsOnGpuCuda(Point * points, int numOfPoints)
{
Point * pointsGPU;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
checkErrorStatus(cudaStatus);
// Allocate GPU buffers for points
cudaStatus = hipMalloc((void**)&pointsGPU, numOfPoints * sizeof(Point));
checkErrorStatus(cudaStatus);
//cuda memcpy to GPU
cudaStatus = hipMemcpy(pointsGPU, points, numOfPoints * sizeof(Point), hipMemcpyHostToDevice);
checkErrorStatus(cudaStatus);
return pointsGPU;
}
Cluster * allocateClustersOnGPU(Cluster * clusters)
{
Cluster * clustersGPU;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
checkErrorStatus(cudaStatus);
// Allocate GPU buffers for clusters
cudaStatus = hipMalloc((void**)&clustersGPU, params.numOfClusters * sizeof(Cluster));
checkErrorStatus(cudaStatus);
//cuda memcpy to GPU
cudaStatus = hipMemcpy(clustersGPU, clusters, params.numOfClusters * sizeof(Cluster), hipMemcpyHostToDevice);
checkErrorStatus(cudaStatus);
return clustersGPU;
}
Point * progressPointsLocationCuda(Point * points, int numOfPoints, Point * pointArr_onGPU)
{
hipError_t cudaStatus;
hipDeviceProp_t prop;
int numOfBlocks;
//int pointIndex;
//get device properties
cudaStatus = hipGetDeviceProperties(&prop, 0);
checkErrorStatus(cudaStatus);
numOfBlocks = getNumOfBlock(numOfPoints, prop);
// Choose which GPU to run on
cudaStatus = hipSetDevice(0);
checkErrorStatus(cudaStatus);
updatePointsLocation << <numOfBlocks, prop.maxThreadsPerBlock >> >(pointArr_onGPU, numOfPoints, params.timeInterval, prop.maxThreadsPerBlock);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkErrorStatus(cudaStatus);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
checkErrorStatus(cudaStatus);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(points, pointArr_onGPU, numOfPoints * sizeof(Point), hipMemcpyDeviceToHost);
checkErrorStatus(cudaStatus);
// for (pointIndex = 0; pointIndex < numOfPoints; pointIndex++)
// {
// cudaStatus = hipMemcpy(points->location, pointArr_onGPU->location, sizeof(points->location), hipMemcpyDeviceToHost);
// checkErrorStatus(cudaStatus);
// }
return pointArr_onGPU;
}
//returns true if points have moved, else - false
bool groupPointsCuda(Cluster * clusters, Point ** pointsOnGPU, int numOfPoints)
{
bool ret = false;
hipError_t cudaStatus;
hipDeviceProp_t prop;
int numOfBlocks = 0;
Cluster* clustersOnGPU;
//get device properties
cudaStatus = hipGetDeviceProperties(&prop, 0);
checkErrorStatus(cudaStatus);
cudaStatus = hipSetDevice(0);
checkErrorStatus(cudaStatus);
clustersOnGPU = allocateClustersOnGPU(clusters);
numOfBlocks = getNumOfBlock(numOfPoints, prop);
printf("numOfthreads : %d , numOfBlocks : %d\n", prop.maxThreadsPerBlock , numOfBlocks);
fflush(stdout);
ret = false;
groupPoints << <numOfBlocks*2, prop.maxThreadsPerBlock/2 >> > (clustersOnGPU, params.numOfClusters, pointsOnGPU, numOfPoints, prop.maxThreadsPerBlock/2, &ret);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkErrorStatus(cudaStatus);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
checkErrorStatus(cudaStatus);
//free cuda clusters
hipFree(clustersOnGPU);
return ret;
}
int getNumOfBlock(int numOfPoints, hipDeviceProp_t prop)
{
int numOfBlocks = numOfPoints / prop.maxThreadsPerBlock;
if (numOfPoints % prop.maxThreadsPerBlock)
{
numOfBlocks++;
}
return numOfBlocks;
}
void checkErrorStatus(hipError_t e)
{
// check if the status from the cuda was ok
if (e != hipSuccess)
{
printf("Cuda Error: %d\n", e);
fflush(stdout);
exit(1);
}
}
#endif
| ee1dc3f1b1ac42f44dadc1c3a1b46c8366886c86.cu | #include "Kernel.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#ifdef PARALLEL
int getNumOfBlock(int pointsArrSize, cudaDeviceProp prop);
void checkErrorStatus(cudaError e);
__device__ double calcDistanceCuda(double location1[DIMENSIONS], double location2[DIMENSIONS])
{
double powDistanceSum = 0;
int dimensionIndex;
for (dimensionIndex = 0; dimensionIndex < DIMENSIONS; dimensionIndex++)
{
powDistanceSum += pow((location1[dimensionIndex] - location2[dimensionIndex]), 2);
}
double sqrtD = sqrt(powDistanceSum);
return sqrtD;
}
__global__ void updatePointsLocation(Point * points, int numOfPoints, double timeInterval, int numOfThreadsPerBlock)
{
int pointIndex = blockIdx.x*numOfThreadsPerBlock + threadIdx.x;
if (pointIndex < numOfPoints)
{
int axisIndex;
for (axisIndex = 0; axisIndex < DIMENSIONS; axisIndex++)
{
points[pointIndex].location[axisIndex] += timeInterval*points[pointIndex].velocity[axisIndex];
}
}
}
__global__ void groupPoints(Cluster * clusters, int numOfClusters, Point ** points, int numOfPoints, int numOfThreadsPerBlock, bool * pointsMoved)
{
//*pointsMoved = false;
int pointIndex = blockIdx.x*numOfThreadsPerBlock + threadIdx.x;
if (pointIndex < numOfPoints)
{
Point * currentPoint = &((*points)[pointIndex]);
int clusterIndex = 0;
double minDistance = calcDistanceCuda(currentPoint->location, clusters[clusterIndex].center);
int closestClusterIndex = clusterIndex;
for (clusterIndex = 1; clusterIndex < numOfClusters; clusterIndex++)
{
double distance = calcDistanceCuda(currentPoint->location, clusters[clusterIndex].center);
if (distance < minDistance)
{
minDistance = distance;
closestClusterIndex = clusterIndex;
}
}
//update the current cluster
if (currentPoint->currentCluster != closestClusterIndex)
{
currentPoint->currentCluster = closestClusterIndex;
*pointsMoved = true;
}
}
}
Point * allocatePointsOnGpuCuda(Point * points, int numOfPoints)
{
Point * pointsGPU;
cudaError cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
checkErrorStatus(cudaStatus);
// Allocate GPU buffers for points
cudaStatus = cudaMalloc((void**)&pointsGPU, numOfPoints * sizeof(Point));
checkErrorStatus(cudaStatus);
//cuda memcpy to GPU
cudaStatus = cudaMemcpy(pointsGPU, points, numOfPoints * sizeof(Point), cudaMemcpyHostToDevice);
checkErrorStatus(cudaStatus);
return pointsGPU;
}
Cluster * allocateClustersOnGPU(Cluster * clusters)
{
Cluster * clustersGPU;
cudaError cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
checkErrorStatus(cudaStatus);
// Allocate GPU buffers for clusters
cudaStatus = cudaMalloc((void**)&clustersGPU, params.numOfClusters * sizeof(Cluster));
checkErrorStatus(cudaStatus);
//cuda memcpy to GPU
cudaStatus = cudaMemcpy(clustersGPU, clusters, params.numOfClusters * sizeof(Cluster), cudaMemcpyHostToDevice);
checkErrorStatus(cudaStatus);
return clustersGPU;
}
Point * progressPointsLocationCuda(Point * points, int numOfPoints, Point * pointArr_onGPU)
{
cudaError_t cudaStatus;
cudaDeviceProp prop;
int numOfBlocks;
//int pointIndex;
//get device properties
cudaStatus = cudaGetDeviceProperties(&prop, 0);
checkErrorStatus(cudaStatus);
numOfBlocks = getNumOfBlock(numOfPoints, prop);
// Choose which GPU to run on
cudaStatus = cudaSetDevice(0);
checkErrorStatus(cudaStatus);
updatePointsLocation << <numOfBlocks, prop.maxThreadsPerBlock >> >(pointArr_onGPU, numOfPoints, params.timeInterval, prop.maxThreadsPerBlock);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkErrorStatus(cudaStatus);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
checkErrorStatus(cudaStatus);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(points, pointArr_onGPU, numOfPoints * sizeof(Point), cudaMemcpyDeviceToHost);
checkErrorStatus(cudaStatus);
// for (pointIndex = 0; pointIndex < numOfPoints; pointIndex++)
// {
// cudaStatus = cudaMemcpy(points->location, pointArr_onGPU->location, sizeof(points->location), cudaMemcpyDeviceToHost);
// checkErrorStatus(cudaStatus);
// }
return pointArr_onGPU;
}
//returns true if points have moved, else - false
bool groupPointsCuda(Cluster * clusters, Point ** pointsOnGPU, int numOfPoints)
{
bool ret = false;
cudaError_t cudaStatus;
cudaDeviceProp prop;
int numOfBlocks = 0;
Cluster* clustersOnGPU;
//get device properties
cudaStatus = cudaGetDeviceProperties(&prop, 0);
checkErrorStatus(cudaStatus);
cudaStatus = cudaSetDevice(0);
checkErrorStatus(cudaStatus);
clustersOnGPU = allocateClustersOnGPU(clusters);
numOfBlocks = getNumOfBlock(numOfPoints, prop);
printf("numOfthreads : %d , numOfBlocks : %d\n", prop.maxThreadsPerBlock , numOfBlocks);
fflush(stdout);
ret = false;
groupPoints << <numOfBlocks*2, prop.maxThreadsPerBlock/2 >> > (clustersOnGPU, params.numOfClusters, pointsOnGPU, numOfPoints, prop.maxThreadsPerBlock/2, &ret);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkErrorStatus(cudaStatus);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
checkErrorStatus(cudaStatus);
//free cuda clusters
cudaFree(clustersOnGPU);
return ret;
}
int getNumOfBlock(int numOfPoints, cudaDeviceProp prop)
{
int numOfBlocks = numOfPoints / prop.maxThreadsPerBlock;
if (numOfPoints % prop.maxThreadsPerBlock)
{
numOfBlocks++;
}
return numOfBlocks;
}
void checkErrorStatus(cudaError e)
{
// check if the status from the cuda was ok
if (e != cudaSuccess)
{
printf("Cuda Error: %d\n", e);
fflush(stdout);
exit(1);
}
}
#endif
|
283b05a2896ed5a3fbc73f3a5355fcef2fee82cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#define divup(a, b) ((a) + (b) - 1) / (b)
const int THREADS_PER_BLOCK = 256;
const int THREADS_X = 32;
const int THREADS_Y = THREADS_PER_BLOCK / THREADS_X;
const int REPEAT = 32;
const int64_t NNZ_PER_BLOCK_MAX = 1024;
/* sign MACRO */
#ifndef clamp
#define clamp(a, low, high) max(min((a), (high)), (low))
#endif
#ifndef ATOMIC_REAL_MINMAX
#define ATOMIC_REAL_MINMAX(func) \
__device__ void atomic_##func(double *address, double val) { \
uint64_t* address_as_ull = (uint64_t*)address; \
uint64_t old = *address_as_ull; \
uint64_t assumed; \
do { \
assumed = old; \
old = atomicCAS((uint64_t *) address_as_ull, (uint64_t) assumed, \
(uint64_t) __double_as_longlong(func(val, __longlong_as_double(assumed)))); \
} while (assumed != old); \
} \
__device__ void atomic_##func(float *address, float val) { \
int* address_as_int = (int*)address; \
int old = *address_as_int; \
int assumed; \
do { \
assumed = old; \
old = atomicCAS(address_as_int, assumed, \
__float_as_int(func(val, __int_as_float(assumed)))); \
} while (assumed != old); \
} \
ATOMIC_REAL_MINMAX(max)
ATOMIC_REAL_MINMAX(min)
#endif
template<typename Ty, bool train>
__global__ static
void updateOutput(
Ty *output,
Ty *normalizedValues,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t *keys,
const int64_t batchSize,
const int64_t outDim,
Ty *weight,
const Ty *bias,
const int64_t weightStride,
const int64_t keysOffset,
const int maxNormalize,
const int nnzPerBlock)
{
/*******************************************************
* Adapted from the following file in arrayfire
* https://github.com/arrayfire/arrayfire/blob/v3.4.1/src/backend/opencl/kernel/csrmm.cl
*
*******************************************************
* Original copyright notice can be seen below:
*
* Copyright (c) 2016, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
const int64_t tidx = threadIdx.x;
const int64_t tidy = threadIdx.y;
const int64_t tid = tidy * blockDim.x + tidx;
const int64_t gidx = blockIdx.x * blockDim.x + tidx;
Ty *nWeight = weight;
// Offset the number of elements specified by maxNormalize
weight += gidx + maxNormalize;
output += gidx;
bool within_N = (gidx < outDim);
__shared__ Ty s_values[THREADS_PER_BLOCK];
__shared__ int64_t s_keys[THREADS_PER_BLOCK];
const int64_t rowId = blockIdx.y;
// if (rowId >= batchSize) return;
// Load the nonzero column offsets for current row
const int64_t batchStart = (rowId == 0 ? 0 : cumSumSizes[rowId - 1]) + blockIdx.z * nnzPerBlock;
const int64_t batchEnd = min(batchStart + nnzPerBlock, cumSumSizes[rowId]);
const int64_t batchStride = blockDim.x * blockDim.y;
Ty outVal = 0;
// Since the number of nonzero elements might be greater than local memory available,
// Load only part of the row into local memory, perform partial dot, repeat until done.
for (int64_t id = batchStart; id < batchEnd; id += batchStride) {
// Load the current chunk of the row into local memory
int64_t lim = min(batchEnd - id, (int64_t)batchStride);
int64_t key = tid < lim ? keys[id + tid] + keysOffset : -1;
Ty val = tid < lim ? values[id + tid] : 0;
int64_t nWeightOffset = key * weightStride;
if (tid < lim && maxNormalize) {
Ty *nWeightCurr = nWeight + nWeightOffset;
if (train) {
Ty absVal = fabs(val);
Ty maxVal = nWeight[key * weightStride + 0];
if (absVal > maxVal) {
// Updating maxVal and invMaxVal. Go hogwild!
atomic_max(nWeightCurr + 0, absVal);
atomic_min(nWeightCurr + 1, 1.0/absVal);
}
val = val * nWeightCurr[1] + nWeightCurr[3];
normalizedValues[id + tid] = val;
} else {
val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3];
}
}
s_keys[tid] = key;
s_values[tid] = val;
__syncthreads();
// Perform a single "dot" operation for each thread
for (int64_t idy = tidy; within_N && idy < lim; idy += blockDim.y) {
outVal += s_values[idy] * weight[weightStride * s_keys[idy]];
}
__syncthreads();
}
// s_values is no longer used at this point. Reuse it for reducing outVal.
// A reduction along the y dimension now gives a single output value along x.
s_values[tid] = outVal;
for (int64_t y = blockDim.y / 2; y >= 1; y /= 2) {
__syncthreads();
if (tidy < y) s_values[tid] = s_values[tid] + s_values[tid + y * blockDim.x];
}
if (within_N && tidy == 0) {
Ty val = s_values[tid] + (blockIdx.z == 0 ? bias[gidx] : 0);
if (gridDim.z == 1) {
output[rowId * outDim] = val;
} else {
atomicAdd(output + rowId * outDim, val);
}
}
}
// This kernel takes in the following inputs:
// values of size [keysSize x 1] and gradOutput of size [batchSize x outDim],
// to generate gradWeight of size [keysSize x outDim]
// nth block along y dimension computes on the non zero elements from the nth batch.
template<typename Ty>
__global__ static
void accGradWeight(
Ty *gradWeight,
const Ty *gradOutput,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t outDim,
const int64_t gradWeightStride,
const Ty scale,
const Ty weightDecay,
const int maxNormalize)
{
const int64_t bidy = blockIdx.y;
const int64_t tidx = threadIdx.x;
const int64_t tidy = threadIdx.y;
const int64_t tid = tidy * blockDim.x + tidx;
const int64_t ntid = blockDim.x * blockDim.y;
const int64_t gidx = blockIdx.x * blockDim.x + tidx;
// All the y threads in the block will use the same gradOutput value
gradOutput += bidy * outDim;
Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0);
// Calculate the amount of work for the current block / batch.
const int64_t batchStart = bidy == 0 ? 0 : cumSumSizes[bidy - 1];
const int64_t batchEnd = cumSumSizes[bidy];
const int64_t batchLimit = batchEnd - batchStart;
// Number of iterations required to finish the work for the current batch.
const int64_t iters = divup(batchLimit, ntid);
// Offset the values to the current batch.
values += batchStart;
// When maxNormalize is enabled, gradWeight will be twice the size.
// The first half will contain the gradients required for maxNormalization.
// The second half will contain the gradients required for updating weights.
// if maxNormalize is false, both will evaluate to the same pointer.
Ty *gradWeight0 = gradWeight + batchStart * gradWeightStride + gidx;
Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0);
__shared__ Ty s_values[THREADS_PER_BLOCK];
// Using iters to avoid divergence + synchtreads
for (int64_t n = 0; n < iters; n++) {
int64_t off = n * ntid;
int64_t id = off + tid;
int64_t lim = min(ntid, batchLimit - off);
// Read the values required for the current iteration.
s_values[tid] = id < batchLimit ? values[id] : 0;
__syncthreads();
if (gidx < outDim) {
if (maxNormalize) {
for (int64_t idy = tidy; idy < lim; idy += blockDim.y) {
// gradOutVal is already scaled
gradWeight0[(off + idy) * gradWeightStride] = gradOutVal;
}
}
for (int64_t idy = tidy; idy < lim; idy += blockDim.y) {
gradWeight1[(off + idy) * gradWeightStride] = s_values[idy] * gradOutVal;
}
}
__syncthreads();
}
}
// The gradBias is just a reduction of gradOutput along the batches.
// There is only one block along y dimension performing the reduction.
template<typename Ty, bool update>
__global__ static
void accGradBias(
Ty *buffer,
const Ty *gradOutput,
const int64_t outDim,
const int64_t batchSize,
const Ty scale,
const Ty weightDecay)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * blockDim.x + tidx;
const int64_t idx = blockIdx.x * blockDim.x + tidx;
Ty gradBiasVal = 0;
gradOutput += idx;
__shared__ Ty s_gradBiasVals[THREADS_PER_BLOCK];
// Each thread along y calculates the partial sum.
if (idx < outDim) {
for (int64_t idy = tidy; idy < batchSize; idy += blockDim.y) {
gradBiasVal += gradOutput[idy * outDim];
}
}
s_gradBiasVals[tid] = gradBiasVal * scale;
__syncthreads();
// Perform reduction is performed along y.
for (int y = blockDim.y / 2; y >= 1; y /= 2) {
if (tidy < y) {
s_gradBiasVals[tid] += s_gradBiasVals[tid + y * blockDim.x];
}
__syncthreads();
}
// Write the output only from the first lane.
if (tidy == 0 && idx < outDim) {
if (update) {
// If performing inplace update, subtract from bias.
Ty *bias = buffer;
bias[idx] = (bias[idx] - s_gradBiasVals[tid]);
} else {
// If just accumulating gradients, write to gradBias.
Ty *gradBias = buffer;
gradBias[idx] = s_gradBiasVals[tid];
}
}
}
// Use gradWeight from accGradWeight to update the weight.
// This kernel is launched batchSize number of times.
// At each step in the iteration, the weights are updated in a sparse manner.
template<typename Ty>
__global__ static
void updateWeight(
Ty *weight,
const Ty *gradWeight,
const int64_t *keys,
const int64_t *cumSumSizes,
const int64_t outDim,
const int64_t gradWeightStride,
const int64_t weightStride,
const int64_t keysOffset,
const Ty learningRate,
const Ty weightDecay,
const int maxNormalize,
const int64_t batchId)
{
int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y;
// Find the limits of the work to be done
const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1];
const int64_t batchEnd = cumSumSizes[batchId];
// When maxNormalize is turned on, the weight tensor will contain
// an extra "maxNormalize" number of terms per output at the beginning.
// When maxNormalize is false, both will evaluate to same pointer.
// when maxNormalize is true,
// - nWeight[2] will contain the individual scaling factor.
// - nWeight[3] will contain the individual bias for the normalized input.
Ty *nWeight = weight;
weight += maxNormalize + gidx;
// When maxNormalize is enabled, gradWeight will be twice the size.
// The first half will contain the gradients required for maxNormalization.
// The second half will contain the gradients required for updating weights.
// if maxNormalize is false, both will evaluate to the same pointer.
const Ty *gradWeight0 = gradWeight + gidx;
const Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0);
if (gidx >= outDim) return;
for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) {
Ty lr = learningRate;
Ty wd = weightDecay;
int64_t weightOffset = (keys[id] + keysOffset) * weightStride;
Ty weightVal = weight[weightOffset];
if (maxNormalize) {
Ty scale = nWeight[weightOffset + 2];
lr *= scale;
wd *= scale;
// nWeight[3] needs to be updated in the following manner for a given input.
// nWeight[3] = nWeight[3] - sum(gradWeight0[gidx] * weight[gidx]);
// Since problem is parallelized along gidx, use atomicAdd for the update.
Ty gradNormBias = lr * weightVal * gradWeight0[id * gradWeightStride];
atomicAdd(nWeight + weightOffset + 3, -gradNormBias);
}
// Perform the regular update
Ty gradWeightVal = lr * gradWeight1[id * gradWeightStride];
if (weightDecay == 0) {
weight[weightOffset] = weightVal - gradWeightVal;
} else {
weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal;
}
}
}
// This kernel is launched batchSize number of times.
// At each step in the iteration, the weights are updated in place in a sparse manner.
template<typename Ty>
__global__ static
void accUpdateWeight(
Ty *weight,
const int64_t weightStride,
const Ty *gradOutput,
const int64_t outDim,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t *keys,
const int64_t keysOffset,
const Ty scale,
const Ty weightDecay,
const int maxNormalize,
const int64_t batchId)
{
// Parallel along outDim.
int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x;
// Parallel along the sparse input size for current batch.
int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y;
if (gidx >= outDim) return;
// Find the limits of the work to be done.
const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1];
const int64_t batchEnd = cumSumSizes[batchId];
gradOutput += batchId * outDim;
Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0);
// When maxNormalize is turned on, the weight tensor will contain
// an extra "maxNormalize" number of terms per output at the beginning.
// When maxNormalize is false, both will evaluate to same pointer.
// when maxNormalize is true,
// - nWeight[2] will contain the individual scaling factor.
// - nWeight[3] will contain the individual bias for the normalized input.
Ty *nWeight = weight;
weight += maxNormalize + gidx;
for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) {
Ty wd = weightDecay;
int64_t weightOffset = (keys[id] + keysOffset) * weightStride;
Ty gradWeightVal = gradOutVal * values[id];
Ty weightVal = weight[weightOffset];
if (maxNormalize) {
Ty nScale = nWeight[weightOffset + 2];
gradWeightVal *= nScale;
wd *= nScale;
// nWeight[3] needs to be updated in the following manner for a given input.
// nWeight[3] = nWeight[3] - sum(gradOut[gidx] * weight[gidx]);
// Since problem is parallelized along gidx, use atomicAdd for the update.
Ty gradNormBias = nScale * weightVal * gradOutVal;
atomicAdd(nWeight + weightOffset + 3, -gradNormBias);
}
// Perform the regular update
if (weightDecay == 0) {
weight[weightOffset] = weightVal - gradWeightVal;
} else {
weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal;
}
}
}
#ifdef CUDA_HALF_TENSOR
void THNN_CudaHalfIndexLinear_updateOutput(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *output,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaHalfTensor *normalizedValues,
int train) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_accGradParameters(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *gradOutput,
THCudaHalfTensor *gradWeight,
THCudaHalfTensor *gradBias,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaHalfTensor* valuesBuffer,
float weightDecay,
float scale) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_accUpdateGradParameters(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *gradOutput,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
float weightDecay,
float scale) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_updateParameters(
THCState *state,
THCudaHalfTensor *gradWeight,
THCudaHalfTensor *gradBias,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaLongTensor *runningKeys,
THCudaLongTensor *cumSumSizes,
int64_t keysOffset,
float weightDecay,
float learningRate) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
#endif
#include "generic/IndexLinear.cu"
#include "THHGenerateFloatType.h"
#include "generic/IndexLinear.cu"
#include "THHGenerateDoubleType.h"
| 283b05a2896ed5a3fbc73f3a5355fcef2fee82cc.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#define divup(a, b) ((a) + (b) - 1) / (b)
const int THREADS_PER_BLOCK = 256;
const int THREADS_X = 32;
const int THREADS_Y = THREADS_PER_BLOCK / THREADS_X;
const int REPEAT = 32;
const int64_t NNZ_PER_BLOCK_MAX = 1024;
/* sign MACRO */
#ifndef clamp
#define clamp(a, low, high) max(min((a), (high)), (low))
#endif
#ifndef ATOMIC_REAL_MINMAX
#define ATOMIC_REAL_MINMAX(func) \
__device__ void atomic_##func(double *address, double val) { \
uint64_t* address_as_ull = (uint64_t*)address; \
uint64_t old = *address_as_ull; \
uint64_t assumed; \
do { \
assumed = old; \
old = atomicCAS((uint64_t *) address_as_ull, (uint64_t) assumed, \
(uint64_t) __double_as_longlong(func(val, __longlong_as_double(assumed)))); \
} while (assumed != old); \
} \
__device__ void atomic_##func(float *address, float val) { \
int* address_as_int = (int*)address; \
int old = *address_as_int; \
int assumed; \
do { \
assumed = old; \
old = atomicCAS(address_as_int, assumed, \
__float_as_int(func(val, __int_as_float(assumed)))); \
} while (assumed != old); \
} \
ATOMIC_REAL_MINMAX(max)
ATOMIC_REAL_MINMAX(min)
#endif
template<typename Ty, bool train>
__global__ static
void updateOutput(
Ty *output,
Ty *normalizedValues,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t *keys,
const int64_t batchSize,
const int64_t outDim,
Ty *weight,
const Ty *bias,
const int64_t weightStride,
const int64_t keysOffset,
const int maxNormalize,
const int nnzPerBlock)
{
/*******************************************************
* Adapted from the following file in arrayfire
* https://github.com/arrayfire/arrayfire/blob/v3.4.1/src/backend/opencl/kernel/csrmm.cl
*
*******************************************************
* Original copyright notice can be seen below:
*
* Copyright (c) 2016, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
const int64_t tidx = threadIdx.x;
const int64_t tidy = threadIdx.y;
const int64_t tid = tidy * blockDim.x + tidx;
const int64_t gidx = blockIdx.x * blockDim.x + tidx;
Ty *nWeight = weight;
// Offset the number of elements specified by maxNormalize
weight += gidx + maxNormalize;
output += gidx;
bool within_N = (gidx < outDim);
__shared__ Ty s_values[THREADS_PER_BLOCK];
__shared__ int64_t s_keys[THREADS_PER_BLOCK];
const int64_t rowId = blockIdx.y;
// if (rowId >= batchSize) return;
// Load the nonzero column offsets for current row
const int64_t batchStart = (rowId == 0 ? 0 : cumSumSizes[rowId - 1]) + blockIdx.z * nnzPerBlock;
const int64_t batchEnd = min(batchStart + nnzPerBlock, cumSumSizes[rowId]);
const int64_t batchStride = blockDim.x * blockDim.y;
Ty outVal = 0;
// Since the number of nonzero elements might be greater than local memory available,
// Load only part of the row into local memory, perform partial dot, repeat until done.
for (int64_t id = batchStart; id < batchEnd; id += batchStride) {
// Load the current chunk of the row into local memory
int64_t lim = min(batchEnd - id, (int64_t)batchStride);
int64_t key = tid < lim ? keys[id + tid] + keysOffset : -1;
Ty val = tid < lim ? values[id + tid] : 0;
int64_t nWeightOffset = key * weightStride;
if (tid < lim && maxNormalize) {
Ty *nWeightCurr = nWeight + nWeightOffset;
if (train) {
Ty absVal = fabs(val);
Ty maxVal = nWeight[key * weightStride + 0];
if (absVal > maxVal) {
// Updating maxVal and invMaxVal. Go hogwild!
atomic_max(nWeightCurr + 0, absVal);
atomic_min(nWeightCurr + 1, 1.0/absVal);
}
val = val * nWeightCurr[1] + nWeightCurr[3];
normalizedValues[id + tid] = val;
} else {
val = clamp(val * nWeightCurr[1], -1.0, 1.0) + nWeightCurr[3];
}
}
s_keys[tid] = key;
s_values[tid] = val;
__syncthreads();
// Perform a single "dot" operation for each thread
for (int64_t idy = tidy; within_N && idy < lim; idy += blockDim.y) {
outVal += s_values[idy] * weight[weightStride * s_keys[idy]];
}
__syncthreads();
}
// s_values is no longer used at this point. Reuse it for reducing outVal.
// A reduction along the y dimension now gives a single output value along x.
s_values[tid] = outVal;
for (int64_t y = blockDim.y / 2; y >= 1; y /= 2) {
__syncthreads();
if (tidy < y) s_values[tid] = s_values[tid] + s_values[tid + y * blockDim.x];
}
if (within_N && tidy == 0) {
Ty val = s_values[tid] + (blockIdx.z == 0 ? bias[gidx] : 0);
if (gridDim.z == 1) {
output[rowId * outDim] = val;
} else {
atomicAdd(output + rowId * outDim, val);
}
}
}
// This kernel takes in the following inputs:
// values of size [keysSize x 1] and gradOutput of size [batchSize x outDim],
// to generate gradWeight of size [keysSize x outDim]
// nth block along y dimension computes on the non zero elements from the nth batch.
template<typename Ty>
__global__ static
void accGradWeight(
Ty *gradWeight,
const Ty *gradOutput,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t outDim,
const int64_t gradWeightStride,
const Ty scale,
const Ty weightDecay,
const int maxNormalize)
{
const int64_t bidy = blockIdx.y;
const int64_t tidx = threadIdx.x;
const int64_t tidy = threadIdx.y;
const int64_t tid = tidy * blockDim.x + tidx;
const int64_t ntid = blockDim.x * blockDim.y;
const int64_t gidx = blockIdx.x * blockDim.x + tidx;
// All the y threads in the block will use the same gradOutput value
gradOutput += bidy * outDim;
Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0);
// Calculate the amount of work for the current block / batch.
const int64_t batchStart = bidy == 0 ? 0 : cumSumSizes[bidy - 1];
const int64_t batchEnd = cumSumSizes[bidy];
const int64_t batchLimit = batchEnd - batchStart;
// Number of iterations required to finish the work for the current batch.
const int64_t iters = divup(batchLimit, ntid);
// Offset the values to the current batch.
values += batchStart;
// When maxNormalize is enabled, gradWeight will be twice the size.
// The first half will contain the gradients required for maxNormalization.
// The second half will contain the gradients required for updating weights.
// if maxNormalize is false, both will evaluate to the same pointer.
Ty *gradWeight0 = gradWeight + batchStart * gradWeightStride + gidx;
Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0);
__shared__ Ty s_values[THREADS_PER_BLOCK];
// Using iters to avoid divergence + synchtreads
for (int64_t n = 0; n < iters; n++) {
int64_t off = n * ntid;
int64_t id = off + tid;
int64_t lim = min(ntid, batchLimit - off);
// Read the values required for the current iteration.
s_values[tid] = id < batchLimit ? values[id] : 0;
__syncthreads();
if (gidx < outDim) {
if (maxNormalize) {
for (int64_t idy = tidy; idy < lim; idy += blockDim.y) {
// gradOutVal is already scaled
gradWeight0[(off + idy) * gradWeightStride] = gradOutVal;
}
}
for (int64_t idy = tidy; idy < lim; idy += blockDim.y) {
gradWeight1[(off + idy) * gradWeightStride] = s_values[idy] * gradOutVal;
}
}
__syncthreads();
}
}
// The gradBias is just a reduction of gradOutput along the batches.
// There is only one block along y dimension performing the reduction.
template<typename Ty, bool update>
__global__ static
void accGradBias(
Ty *buffer,
const Ty *gradOutput,
const int64_t outDim,
const int64_t batchSize,
const Ty scale,
const Ty weightDecay)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * blockDim.x + tidx;
const int64_t idx = blockIdx.x * blockDim.x + tidx;
Ty gradBiasVal = 0;
gradOutput += idx;
__shared__ Ty s_gradBiasVals[THREADS_PER_BLOCK];
// Each thread along y calculates the partial sum.
if (idx < outDim) {
for (int64_t idy = tidy; idy < batchSize; idy += blockDim.y) {
gradBiasVal += gradOutput[idy * outDim];
}
}
s_gradBiasVals[tid] = gradBiasVal * scale;
__syncthreads();
// Perform reduction is performed along y.
for (int y = blockDim.y / 2; y >= 1; y /= 2) {
if (tidy < y) {
s_gradBiasVals[tid] += s_gradBiasVals[tid + y * blockDim.x];
}
__syncthreads();
}
// Write the output only from the first lane.
if (tidy == 0 && idx < outDim) {
if (update) {
// If performing inplace update, subtract from bias.
Ty *bias = buffer;
bias[idx] = (bias[idx] - s_gradBiasVals[tid]);
} else {
// If just accumulating gradients, write to gradBias.
Ty *gradBias = buffer;
gradBias[idx] = s_gradBiasVals[tid];
}
}
}
// Use gradWeight from accGradWeight to update the weight.
// This kernel is launched batchSize number of times.
// At each step in the iteration, the weights are updated in a sparse manner.
template<typename Ty>
__global__ static
void updateWeight(
Ty *weight,
const Ty *gradWeight,
const int64_t *keys,
const int64_t *cumSumSizes,
const int64_t outDim,
const int64_t gradWeightStride,
const int64_t weightStride,
const int64_t keysOffset,
const Ty learningRate,
const Ty weightDecay,
const int maxNormalize,
const int64_t batchId)
{
int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y;
// Find the limits of the work to be done
const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1];
const int64_t batchEnd = cumSumSizes[batchId];
// When maxNormalize is turned on, the weight tensor will contain
// an extra "maxNormalize" number of terms per output at the beginning.
// When maxNormalize is false, both will evaluate to same pointer.
// when maxNormalize is true,
// - nWeight[2] will contain the individual scaling factor.
// - nWeight[3] will contain the individual bias for the normalized input.
Ty *nWeight = weight;
weight += maxNormalize + gidx;
// When maxNormalize is enabled, gradWeight will be twice the size.
// The first half will contain the gradients required for maxNormalization.
// The second half will contain the gradients required for updating weights.
// if maxNormalize is false, both will evaluate to the same pointer.
const Ty *gradWeight0 = gradWeight + gidx;
const Ty *gradWeight1 = gradWeight0 + (maxNormalize ? outDim : 0);
if (gidx >= outDim) return;
for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) {
Ty lr = learningRate;
Ty wd = weightDecay;
int64_t weightOffset = (keys[id] + keysOffset) * weightStride;
Ty weightVal = weight[weightOffset];
if (maxNormalize) {
Ty scale = nWeight[weightOffset + 2];
lr *= scale;
wd *= scale;
// nWeight[3] needs to be updated in the following manner for a given input.
// nWeight[3] = nWeight[3] - sum(gradWeight0[gidx] * weight[gidx]);
// Since problem is parallelized along gidx, use atomicAdd for the update.
Ty gradNormBias = lr * weightVal * gradWeight0[id * gradWeightStride];
atomicAdd(nWeight + weightOffset + 3, -gradNormBias);
}
// Perform the regular update
Ty gradWeightVal = lr * gradWeight1[id * gradWeightStride];
if (weightDecay == 0) {
weight[weightOffset] = weightVal - gradWeightVal;
} else {
weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal;
}
}
}
// This kernel is launched batchSize number of times.
// At each step in the iteration, the weights are updated in place in a sparse manner.
template<typename Ty>
__global__ static
void accUpdateWeight(
Ty *weight,
const int64_t weightStride,
const Ty *gradOutput,
const int64_t outDim,
const Ty *values,
const int64_t *cumSumSizes,
const int64_t *keys,
const int64_t keysOffset,
const Ty scale,
const Ty weightDecay,
const int maxNormalize,
const int64_t batchId)
{
// Parallel along outDim.
int64_t gidx = blockIdx.x * blockDim.x + threadIdx.x;
// Parallel along the sparse input size for current batch.
int64_t gidy = blockIdx.y * blockDim.y + threadIdx.y;
if (gidx >= outDim) return;
// Find the limits of the work to be done.
const int64_t batchStart = batchId == 0 ? 0 : cumSumSizes[batchId - 1];
const int64_t batchEnd = cumSumSizes[batchId];
gradOutput += batchId * outDim;
Ty gradOutVal = scale * (gidx < outDim ? gradOutput[gidx] : 0);
// When maxNormalize is turned on, the weight tensor will contain
// an extra "maxNormalize" number of terms per output at the beginning.
// When maxNormalize is false, both will evaluate to same pointer.
// when maxNormalize is true,
// - nWeight[2] will contain the individual scaling factor.
// - nWeight[3] will contain the individual bias for the normalized input.
Ty *nWeight = weight;
weight += maxNormalize + gidx;
for (int64_t id = batchStart + gidy; id < batchEnd; id += blockDim.y * gridDim.y) {
Ty wd = weightDecay;
int64_t weightOffset = (keys[id] + keysOffset) * weightStride;
Ty gradWeightVal = gradOutVal * values[id];
Ty weightVal = weight[weightOffset];
if (maxNormalize) {
Ty nScale = nWeight[weightOffset + 2];
gradWeightVal *= nScale;
wd *= nScale;
// nWeight[3] needs to be updated in the following manner for a given input.
// nWeight[3] = nWeight[3] - sum(gradOut[gidx] * weight[gidx]);
// Since problem is parallelized along gidx, use atomicAdd for the update.
Ty gradNormBias = nScale * weightVal * gradOutVal;
atomicAdd(nWeight + weightOffset + 3, -gradNormBias);
}
// Perform the regular update
if (weightDecay == 0) {
weight[weightOffset] = weightVal - gradWeightVal;
} else {
weight[weightOffset] = weightVal * (1 - wd) - gradWeightVal;
}
}
}
#ifdef CUDA_HALF_TENSOR
void THNN_CudaHalfIndexLinear_updateOutput(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *output,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaHalfTensor *normalizedValues,
int train) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_accGradParameters(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *gradOutput,
THCudaHalfTensor *gradWeight,
THCudaHalfTensor *gradBias,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaHalfTensor* valuesBuffer,
float weightDecay,
float scale) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_accUpdateGradParameters(
THCState *state,
THCudaLongTensor *keys,
int64_t keysOffset,
THCudaHalfTensor *values,
THCudaLongTensor *sizes,
THCudaLongTensor *cumSumSizes,
THCudaHalfTensor *gradOutput,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
float weightDecay,
float scale) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
void THNN_CudaHalfIndexLinear_updateParameters(
THCState *state,
THCudaHalfTensor *gradWeight,
THCudaHalfTensor *gradBias,
THCudaHalfTensor *weight,
THCudaHalfTensor *bias,
THCudaLongTensor *runningKeys,
THCudaLongTensor *cumSumSizes,
int64_t keysOffset,
float weightDecay,
float learningRate) {
THError("THCudaHalfTensor not supported with IndexLinear");
}
#endif
#include "generic/IndexLinear.cu"
#include "THCGenerateFloatType.h"
#include "generic/IndexLinear.cu"
#include "THCGenerateDoubleType.h"
|
f75340d708ca6560a35ed54a2f8a25c30c97586a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::concat(int n, const Tensor* tensors,
int axis)
{
Concat *cat = new Concat(*this, n, tensors, axis);
layers.push_back(cat);
return cat->outputs[0];
}
Concat::Concat(FFModel& model,
int _n, const Tensor* _tensors,
int _axis)
: Op(model, OP_CONCAT, "Concat_"+std::to_string(_axis), _n, _tensors), axis(_axis),
profiling(model.config.profiling)
{
//TODO: swich to use the Legion dim ordering
int num_dim = inputs[0].numDim;
outputs[0].numDim = num_dim;
for (int i = 0; i < num_dim; i++)
outputs[0].adim[i] = inputs[0].adim[i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != num_dim - 1 - axis)
assert(inputs[i].adim[j] == outputs[0].adim[j]);
else
outputs[0].adim[j] += inputs[i].adim[j];
}
numOutputs = 1;
numWeights = 0;
}
void Concat::create_weights(FFModel& model)
{
// DO nothing
}
void Concat::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = model.get_or_create_task_is(inputs[0].numDim, pcname);
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim;
assert(num_dim == domain.get_dim());
for (int i = 0; i < num_dim; i++)
dims[i] = inputs[0].adim[num_dim-1-i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != axis)
assert(inputs[i].adim[num_dim-1-j] == dims[j]);
else
dims[j] += inputs[i].adim[num_dim-1-j];
}
//for (int i = 0; i < num_dim; i++)
//printf("concat: dim[%d] = %d\n", i, dims[i]);
switch (domain.get_dim()) {
case 1:
{
Rect<1> part_rect = domain;
outputs[0] = model.create_tensor<1>(dims, IndexSpaceT<1>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<1> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<1>(inputs[i],
IndexSpaceT<1>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 2:
{
Rect<2> part_rect = domain;
outputs[0] = model.create_tensor<2>(dims, IndexSpaceT<2>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<2>(inputs[i],
IndexSpaceT<2>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 3:
{
Rect<3> part_rect = domain;
outputs[0] = model.create_tensor<3>(dims, IndexSpaceT<3>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<3> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<3>(inputs[i],
IndexSpaceT<3>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 4:
{
Rect<4> part_rect = domain;
outputs[0] = model.create_tensor<4>(dims, IndexSpaceT<4>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<4>(inputs[i],
IndexSpaceT<4>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
#if MAX_TENSOR_DIM >= 5
case 5:
{
Rect<5> part_rect = domain;
outputs[0] = model.create_tensor<5>(dims, IndexSpaceT<5>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<5> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<5>(inputs[i],
IndexSpaceT<5>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
#endif
default:
{
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
}
}
__host__
OpMeta* Concat::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
//FFHandler handler = *((const FFHandler*) task->local_args);
//ConcatMeta* m = new ConcatMeta(handler);
//return m;
// Return null since Concat ops don't need ConcatMeta
return NULL;
}
void Concat::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
}
template<int N>
void calc_blk_size(coord_t& num_blocks,
coord_t& blk_size,
Rect<N> rect,
int axis)
{
num_blocks = 1;
blk_size = 1;
for (int d = 0; d < N; d++) {
if (d <= axis)
blk_size *= (rect.hi[d] - rect.lo[d] + 1);
else
num_blocks *= (rect.hi[d] - rect.lo[d] + 1);
}
}
/*
regions[0](O): output
regions[1..numInputs](I): inputs
*/
void Concat::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
float *output;
const float *inputs[MAX_NUM_INPUTS];
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(cc->numInputs <= MAX_NUM_INPUTS);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(domain.get_dim() == cc->outputs[0].numDim);
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<float, 1> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<1>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 1> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<1>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 2:
{
TensorAccessorW<float, 2> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<2>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 2> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<2>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 3:
{
TensorAccessorW<float, 3> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<3>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 3> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<3>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 4:
{
TensorAccessorW<float, 4> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<4>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 4> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<4>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < cc->numInputs; i++) {
hipLaunchKernelGGL(( copy_with_stride), dim3(GET_BLOCKS(input_blk_sizes[i]*num_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]);
//printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n",
// output, num_blocks, output_blk_size, i, input_blk_sizes[i]);
output += input_blk_sizes[i];
}
checkCUDA(hipDeviceSynchronize());
if (cc->profiling) {
//print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]");
printf("output_blk_size=%zu\n", output_blk_size);
//print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]");
//print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]");
}
#ifdef DEADCODE
const AccessorWO<float, 3> acc_output(regions[0], FID_DATA);
Rect<3> rect_output;
rect_output =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_bound = output_ptr + rect_output.volume();
for (int i = 0; i < cc->numInputs; i++) {
const AccessorRO<float, 3> acc_input(regions[i+1], FID_DATA);
Rect<3> rect_input =
runtime->get_index_space_domain(ctx, task->regions[i+1].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
const float *input_ptr = acc_input.ptr(rect_input.lo);
checkCUDA(hipMemcpyAsync(output_ptr, input_ptr,
rect_input.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
output_ptr += rect_input.volume();
}
assert(output_ptr == output_bound);
#endif
}
void Concat::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#ifdef DEADCODE
Rect<3> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
#endif
IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): output_grad
regions[1..numInputs](I/O): input_grad
*/
void Concat::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
const float *output_grad;
float *input_grads[MAX_NUM_INPUTS];
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(cc->numInputs <= MAX_NUM_INPUTS);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(domain.get_dim() == cc->outputs[0].numDim);
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<1>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 1> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<1>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<2>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 2> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<2>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 3:
{
TensorAccessorR<float, 3> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<3>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 3> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<3>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 4:
{
TensorAccessorR<float, 4> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<4>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 4> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<4>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < cc->numInputs; i++) {
hipLaunchKernelGGL(( add_with_stride), dim3(GET_BLOCKS(input_blk_sizes[i]*num_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size);
output_grad += input_blk_sizes[i];
}
checkCUDA(hipDeviceSynchronize());
if (cc->profiling) {
int batch_size = domain.get_volume() / output_blk_size;
Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1));
Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1));
//print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]");
//print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]");
}
#ifdef DEADCODE
const AccessorRO<float, 3> acc_output(regions[0], FID_DATA);
Rect<3> rect_output;
rect_output =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
float *output_ptr = (float*) acc_output.ptr(rect_output.lo);
float *output_bound = output_ptr + rect_output.volume();
for (int i = 0; i < cc->numInputs; i++) {
const AccessorWO<float, 3> acc_input(regions[i+1], FID_DATA);
Rect<3> rect_input =
runtime->get_index_space_domain(ctx, task->regions[i+1].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
float *input_ptr = acc_input.ptr(rect_input.lo);
checkCUDA(hipMemcpyAsync(input_ptr, output_ptr,
rect_input.volume() * sizeof(float),
hipMemcpyDeviceToDevice));
output_ptr += rect_input.volume();
}
assert(output_ptr == output_bound);
#endif
}
void Concat::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#ifdef DEADCODE
Rect<3> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
#endif
IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i].region_grad));
//LogicalRegion lr = inputs[i].region_grad;
//printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id());
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
bool Concat::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
| f75340d708ca6560a35ed54a2f8a25c30c97586a.cu | /* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::concat(int n, const Tensor* tensors,
int axis)
{
Concat *cat = new Concat(*this, n, tensors, axis);
layers.push_back(cat);
return cat->outputs[0];
}
Concat::Concat(FFModel& model,
int _n, const Tensor* _tensors,
int _axis)
: Op(model, OP_CONCAT, "Concat_"+std::to_string(_axis), _n, _tensors), axis(_axis),
profiling(model.config.profiling)
{
//TODO: swich to use the Legion dim ordering
int num_dim = inputs[0].numDim;
outputs[0].numDim = num_dim;
for (int i = 0; i < num_dim; i++)
outputs[0].adim[i] = inputs[0].adim[i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != num_dim - 1 - axis)
assert(inputs[i].adim[j] == outputs[0].adim[j]);
else
outputs[0].adim[j] += inputs[i].adim[j];
}
numOutputs = 1;
numWeights = 0;
}
void Concat::create_weights(FFModel& model)
{
// DO nothing
}
void Concat::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = model.get_or_create_task_is(inputs[0].numDim, pcname);
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim;
assert(num_dim == domain.get_dim());
for (int i = 0; i < num_dim; i++)
dims[i] = inputs[0].adim[num_dim-1-i];
for (int i = 1; i < numInputs; i++)
for (int j = 0; j < num_dim; j++) {
if (j != axis)
assert(inputs[i].adim[num_dim-1-j] == dims[j]);
else
dims[j] += inputs[i].adim[num_dim-1-j];
}
//for (int i = 0; i < num_dim; i++)
//printf("concat: dim[%d] = %d\n", i, dims[i]);
switch (domain.get_dim()) {
case 1:
{
Rect<1> part_rect = domain;
outputs[0] = model.create_tensor<1>(dims, IndexSpaceT<1>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<1> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<1>(inputs[i],
IndexSpaceT<1>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 2:
{
Rect<2> part_rect = domain;
outputs[0] = model.create_tensor<2>(dims, IndexSpaceT<2>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<2> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<2>(inputs[i],
IndexSpaceT<2>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 3:
{
Rect<3> part_rect = domain;
outputs[0] = model.create_tensor<3>(dims, IndexSpaceT<3>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<3> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<3>(inputs[i],
IndexSpaceT<3>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
case 4:
{
Rect<4> part_rect = domain;
outputs[0] = model.create_tensor<4>(dims, IndexSpaceT<4>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<4> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<4>(inputs[i],
IndexSpaceT<4>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
#if MAX_TENSOR_DIM >= 5
case 5:
{
Rect<5> part_rect = domain;
outputs[0] = model.create_tensor<5>(dims, IndexSpaceT<5>(task_is), DT_FLOAT);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < numInputs; i++) {
Rect<5> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<5>(inputs[i],
IndexSpaceT<5>(task_is), input_lps[i], input_grad_lps[i]);
}
}
break;
}
#endif
default:
{
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
}
}
__host__
OpMeta* Concat::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
//FFHandler handler = *((const FFHandler*) task->local_args);
//ConcatMeta* m = new ConcatMeta(handler);
//return m;
// Return null since Concat ops don't need ConcatMeta
return NULL;
}
void Concat::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
}
template<int N>
void calc_blk_size(coord_t& num_blocks,
coord_t& blk_size,
Rect<N> rect,
int axis)
{
num_blocks = 1;
blk_size = 1;
for (int d = 0; d < N; d++) {
if (d <= axis)
blk_size *= (rect.hi[d] - rect.lo[d] + 1);
else
num_blocks *= (rect.hi[d] - rect.lo[d] + 1);
}
}
/*
regions[0](O): output
regions[1..numInputs](I): inputs
*/
void Concat::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
float *output;
const float *inputs[MAX_NUM_INPUTS];
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(cc->numInputs <= MAX_NUM_INPUTS);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(domain.get_dim() == cc->outputs[0].numDim);
switch (domain.get_dim()) {
case 1:
{
TensorAccessorW<float, 1> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<1>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 1> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<1>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 2:
{
TensorAccessorW<float, 2> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<2>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 2> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<2>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 3:
{
TensorAccessorW<float, 3> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<3>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 3> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<3>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 4:
{
TensorAccessorW<float, 4> accOutput(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
false/*readOutput*/);
output = accOutput.ptr;
calc_blk_size<4>(num_blocks, output_blk_size, accOutput.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorR<float, 4> accInput(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime);
inputs[i] = accInput.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<4>(input_num_blocks, input_blk_sizes[i], accInput.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < cc->numInputs; i++) {
copy_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS>>>(
output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]);
//printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n",
// output, num_blocks, output_blk_size, i, input_blk_sizes[i]);
output += input_blk_sizes[i];
}
checkCUDA(cudaDeviceSynchronize());
if (cc->profiling) {
//print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]");
printf("output_blk_size=%zu\n", output_blk_size);
//print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]");
//print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]");
}
#ifdef DEADCODE
const AccessorWO<float, 3> acc_output(regions[0], FID_DATA);
Rect<3> rect_output;
rect_output =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
float *output_ptr = acc_output.ptr(rect_output.lo);
float *output_bound = output_ptr + rect_output.volume();
for (int i = 0; i < cc->numInputs; i++) {
const AccessorRO<float, 3> acc_input(regions[i+1], FID_DATA);
Rect<3> rect_input =
runtime->get_index_space_domain(ctx, task->regions[i+1].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
const float *input_ptr = acc_input.ptr(rect_input.lo);
checkCUDA(cudaMemcpyAsync(output_ptr, input_ptr,
rect_input.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
output_ptr += rect_input.volume();
}
assert(output_ptr == output_bound);
#endif
}
void Concat::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#ifdef DEADCODE
Rect<3> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
#endif
IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_lps[i], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[i].region));
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
/*
regions[0](I): output_grad
regions[1..numInputs](I/O): input_grad
*/
void Concat::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
const Concat* cc = (Concat*) task->args;
// Note that our internal axis index ordering is opposite to other frameworks
int axis = cc->outputs[0].numDim - 1 - cc->axis;
assert(regions.size() == cc->numInputs + 1);
assert(task->regions.size() == cc->numInputs + 1);
const float *output_grad;
float *input_grads[MAX_NUM_INPUTS];
coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS];
assert(cc->numInputs <= MAX_NUM_INPUTS);
Domain domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
assert(domain.get_dim() == cc->outputs[0].numDim);
switch (domain.get_dim()) {
case 1:
{
TensorAccessorR<float, 1> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<1>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 1> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<1>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 2:
{
TensorAccessorR<float, 2> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<2>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 2> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<2>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 3:
{
TensorAccessorR<float, 3> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<3>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 3> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<3>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
case 4:
{
TensorAccessorR<float, 4> accOutputGrad(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
output_grad = accOutputGrad.ptr;
calc_blk_size<4>(num_blocks, output_blk_size, accOutputGrad.rect, axis);
for (int i = 0; i < cc->numInputs; i++) {
TensorAccessorW<float, 4> accInputGrad(
regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime,
true/*readOutput*/);
input_grads[i] = accInputGrad.ptr;
coord_t input_num_blocks = 1;
calc_blk_size<4>(input_num_blocks, input_blk_sizes[i], accInputGrad.rect, axis);
assert(input_num_blocks == num_blocks);
}
break;
}
default:
fprintf(stderr, "Unsupported concat dimension number");
assert(false);
}
for (int i = 0; i < cc->numInputs; i++) {
add_with_stride<<<GET_BLOCKS(input_blk_sizes[i]*num_blocks), CUDA_NUM_THREADS>>>(
input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size);
output_grad += input_blk_sizes[i];
}
checkCUDA(cudaDeviceSynchronize());
if (cc->profiling) {
int batch_size = domain.get_volume() / output_blk_size;
Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1));
Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1));
//print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]");
//print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]");
}
#ifdef DEADCODE
const AccessorRO<float, 3> acc_output(regions[0], FID_DATA);
Rect<3> rect_output;
rect_output =
runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space());
assert(acc_output.accessor.is_dense_arbitrary(rect_output));
float *output_ptr = (float*) acc_output.ptr(rect_output.lo);
float *output_bound = output_ptr + rect_output.volume();
for (int i = 0; i < cc->numInputs; i++) {
const AccessorWO<float, 3> acc_input(regions[i+1], FID_DATA);
Rect<3> rect_input =
runtime->get_index_space_domain(ctx, task->regions[i+1].region.get_index_space());
assert(acc_input.accessor.is_dense_arbitrary(rect_input));
float *input_ptr = acc_input.ptr(rect_input.lo);
checkCUDA(cudaMemcpyAsync(input_ptr, output_ptr,
rect_input.volume() * sizeof(float),
cudaMemcpyDeviceToDevice));
output_ptr += rect_input.volume();
}
assert(output_ptr == output_bound);
#endif
}
void Concat::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
#ifdef DEADCODE
Rect<3> rect = runtime->get_index_space_domain(ctx, task_is);
int idx = 0;
for (PointInRectIterator<3> it(rect); it(); it++) {
OpMeta* mp = meta[idx++];
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*)));
}
#endif
IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Concat)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
for (int i = 0; i < numInputs; i++) {
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[i], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[i].region_grad));
//LogicalRegion lr = inputs[i].region_grad;
//printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id());
launcher.add_field(i + 1, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
bool Concat::measure_compute_time(Simulator* sim,
const ParallelConfig& pc,
float& forward_time,
float& backward_time)
{
//TODO: implement measure_forward
return false;
}
|
c84208a5704682e77b701d49125389fb0d304831.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cu_pooling_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
float *loc = NULL;
hipMalloc(&loc, XSIZE*YSIZE);
const int rowssrc = 1;
const int colssrc = 1;
const int rowsdst = 1;
const int colsdst = 1;
const int stridex = 1;
const int stridey = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cu_pooling_max), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cu_pooling_max), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cu_pooling_max), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c84208a5704682e77b701d49125389fb0d304831.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cu_pooling_max.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
float *loc = NULL;
cudaMalloc(&loc, XSIZE*YSIZE);
const int rowssrc = 1;
const int colssrc = 1;
const int rowsdst = 1;
const int colsdst = 1;
const int stridex = 1;
const int stridey = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cu_pooling_max<<<gridBlock,threadBlock>>>(src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cu_pooling_max<<<gridBlock,threadBlock>>>(src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cu_pooling_max<<<gridBlock,threadBlock>>>(src,dst,loc,rowssrc,colssrc,rowsdst,colsdst,stridex,stridey,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
281ec5b95f5a40b95ea351576a723a39d39b3395.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<iostream>
__global__ void simpleKernel(int a, int* dA)
{
//this adds a value to a variable stored in global memory
int x = threadIdx.x;
int y = blockIdx.x;
// printf("x is %d, y is %d, index is %d, num is %d\n",x,8*y+x,a*x+y);
dA[8*y+x] = a*x + y;
}
int main()
{
int hA[16], *dA;
//allocate memory on the device (GPU); zero out all entries in this device array
hipMalloc((void**)&dA, sizeof(int) * 16);
hipMemset(dA, 0, 16 * sizeof(int));
const int RANGE = 10;
int a = rand() % (RANGE + 1);
//invoke GPU kernel, with one block that has four threads
hipLaunchKernelGGL(( simpleKernel), dim3(2),dim3(8), 0, 0, a, dA);
hipDeviceSynchronize();
//bring the result back from the GPU into the hostArray
hipMemcpy(&hA, dA, sizeof(int) * 16, hipMemcpyDeviceToHost);
for (int i = 0; i < 16; i++)
std::cout << hA[i] << " ";
std::cout << "\n";
//release the memory allocated on the GPU
hipFree(dA);
return 0;
}
| 281ec5b95f5a40b95ea351576a723a39d39b3395.cu | #include<cuda.h>
#include<iostream>
__global__ void simpleKernel(int a, int* dA)
{
//this adds a value to a variable stored in global memory
int x = threadIdx.x;
int y = blockIdx.x;
// printf("x is %d, y is %d, index is %d, num is %d\n",x,8*y+x,a*x+y);
dA[8*y+x] = a*x + y;
}
int main()
{
int hA[16], *dA;
//allocate memory on the device (GPU); zero out all entries in this device array
cudaMalloc((void**)&dA, sizeof(int) * 16);
cudaMemset(dA, 0, 16 * sizeof(int));
const int RANGE = 10;
int a = rand() % (RANGE + 1);
//invoke GPU kernel, with one block that has four threads
simpleKernel<<<2,8>>>(a, dA);
cudaDeviceSynchronize();
//bring the result back from the GPU into the hostArray
cudaMemcpy(&hA, dA, sizeof(int) * 16, cudaMemcpyDeviceToHost);
for (int i = 0; i < 16; i++)
std::cout << hA[i] << " ";
std::cout << "\n";
//release the memory allocated on the GPU
cudaFree(dA);
return 0;
}
|
d8b8d7530dea2bddda8f151ccff01826ff8c939b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Description: implementations of elementwise_add_op according to ABY3 protocol
#include "paddle/fluid/platform/transform.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/math/functors.h"
#include "core/paddlefl_mpc/mpc_protocol/context_holder.h"
#include "core/paddlefl_mpc/mpc_protocol/aby3_operators_impl/common.cu.h"
// #include "core/paddlefl_mpc/operators/math/elementwise_op_function.h"
#include "core/paddlefl_mpc/mpc_protocol/aby3_operators_impl/elementwise_op.h"
namespace paddle {
namespace operators {
namespace aby3 {
using paddle::framework::Tensor;
using namespace paddle::operators::math;
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
using ::aby3::ABY3Context;
using paddle::mpc::ContextHolder;
void add(const Tensor *lhs, const Tensor *rhs, Tensor *out, int axis) {
PADDLE_ENFORCE(lhs->dims()[0] == 2 && rhs->dims()[0] == 2,
"The first dimension of input x of protocol ABY3 should be equal to 2.");
if (lhs->dims() == rhs->dims()) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->add(rhs_, out_);
} else {
Tensor in_x_t_slice;
Tensor in_y_t_slice;
Tensor out_t_slice;
for (size_t i = 0; i < SHARE_NUM; ++i) {
in_x_t_slice = lhs->Slice(i, i + 1);
in_y_t_slice = rhs->Slice(i, i + 1);
out_t_slice = out->Slice(i, i + 1);
auto x_dims = in_x_t_slice.dims();
auto y_dims = in_y_t_slice.dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre=0, n=0, post=0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
auto x_ = in_x_t_slice.data<int64_t>();
auto y_ = in_y_t_slice.data<int64_t>();
auto out_ = out_t_slice.data<int64_t>();
auto nx_ = in_x_t_slice.numel();
paddle::platform::Transform<CUDADeviceContext> trans;
auto gpu_device_ctx = dynamic_cast<const CUDADeviceContext*>(
ContextHolder::device_ctx());
if (post == 1) {
trans(*gpu_device_ctx, x_, x_ + nx_,
paddle::operators::RowwiseTransformIterator<int64_t, CUDADeviceContext>(y_, n),
out_, paddle::operators::math::AddFunctor<int64_t>());
} else {
trans(*gpu_device_ctx, x_, x_ + nx_,
paddle::operators::MidWiseTransformIterator<int64_t, CUDADeviceContext>(y_, n, post),
out_, paddle::operators::math::AddFunctor<int64_t>());
}
}
}
}
template <typename T>
__global__ void add_dy(T* dy, const T* dout, size_t pre, size_t post, size_t n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < SHARE_NUM * n) {
int i = col / n;
int k = col % n;
T dy_ = 0;
for (int j = 0; j < pre; ++j) {
for (int m = 0; m < post; ++m) {
int out_offset = i * pre * n * post + j * n * post + k * post + m;
dy_ += dout[out_offset];
}
}
dy[col] = dy_;
col += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void copy_dx(T* dx, const T* dout, size_t size) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void set_zero(T* data, size_t size) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
data[col] = 0;
col += blockDim.x * gridDim.x;
}
}
void add_grad(const Tensor *in_x_t,
const Tensor *in_y_t,
const Tensor *dout,
Tensor *dx, Tensor *dy,
int axis) {
auto ctx = ContextHolder::exec_ctx();
auto dout_data = dout->data<int64_t>();
if (dx) {
auto dx_data = dx->mutable_data<int64_t>(ctx->GetPlace());
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size = dim3((dout->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( copy_dx<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, dx_data, dout_data, dout->numel());
}
if (dy) {
auto dy_data = dy->mutable_data<int64_t>(ctx->GetPlace());
if (in_x_t->dims().size() == in_y_t->dims().size()) {
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((dout->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( copy_dx<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, dy_data, dout_data, dout->numel());
} else {
auto x_dims = in_x_t->dims();
auto y_dims = in_y_t->dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre=0, n=0, post=0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((SHARE_NUM * n + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( add_dy<int64_t>), dim3(grid_size), dim3(block_size), 0, stream,
dy_data, dout_data, pre, post, n);
}
}
}
void sub(const Tensor *lhs, const Tensor *rhs, Tensor *out) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->sub(rhs_, out_);
}
template <typename DeviceContext, typename T>
void Expand(const framework::Tensor* in_y_t,
int axis, Tensor* y_expand_t,
const framework::DDim &expand_dims, const framework::ExecutionContext *ctx) {
T* y_expand_data = y_expand_t->mutable_data<T>(expand_dims, ctx->GetPlace());
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((y_expand_t->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( set_zero<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, y_expand_data, y_expand_t->numel());
Tensor in_y_t_slice;
Tensor y_expand_t_slice;
for (size_t i = 0; i < SHARE_NUM; ++i) {
y_expand_t_slice = y_expand_t->Slice(i, i + 1);
in_y_t_slice = in_y_t->Slice(i, i + 1);
auto y_expand_dims = y_expand_t_slice.dims();
auto y_dims = in_y_t_slice.dims();
axis = (axis == -1 ? y_expand_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < y_expand_dims.size(),
"Axis should be in range [0, x_dims)");
int pre, n, post;
GetMidDims get_mid_dims;
get_mid_dims(y_expand_dims, y_dims, axis, &pre, &n, &post);
auto y_expand_ = y_expand_t_slice.data<T>();
auto y_ = in_y_t_slice.data<T>();
auto nx_ = y_expand_t_slice.numel();
paddle::platform::Transform<DeviceContext> trans;
if (post == 1) {
trans(ctx->template device_context<DeviceContext>(), y_expand_, y_expand_ + nx_,
paddle::operators::RowwiseTransformIterator<T, DeviceContext>(y_, n),
y_expand_, paddle::operators::math::AddFunctor<T>());
} else {
trans(ctx->template device_context<DeviceContext>(), y_expand_, y_expand_ + nx_,
paddle::operators::MidWiseTransformIterator<T, DeviceContext>(y_, n, post),
y_expand_, paddle::operators::math::AddFunctor<T>());
}
}
}
void elementwise_mul(const Tensor *in_x_t, const Tensor *in_y_t, Tensor *out_t, int axis) {
auto ctx = ContextHolder::exec_ctx();
if (in_x_t->dims() == in_y_t->dims()) {
elementwise_mul_with_same_dim(in_x_t, in_y_t, out_t);
} else {
Tensor y_expand_t;
//expand input in_y_t into y_expand_t (dims: in_x_t->dims)
Expand<CUDADeviceContext, int64_t>(in_y_t, axis, &y_expand_t, in_x_t->dims(), ctx);
elementwise_mul_with_same_dim(in_x_t, &y_expand_t, out_t);
}
}
void elementwise_mul_grad(const Tensor *in_x_t,
const Tensor *in_y_t,
const Tensor *dout,
Tensor *dx, Tensor *dy, int axis) {
auto ctx = ContextHolder::exec_ctx();
if (dx) {
// dx = dout * y_expand
auto dx_data = dx->mutable_data<int64_t>(ctx->GetPlace());
Tensor y_expand_t;
// expand in_y_t into y_expand_t (in_x_t->dims)
Expand<CUDADeviceContext, int64_t>(in_y_t, axis, &y_expand_t, in_x_t->dims(), ctx);
elementwise_mul_with_same_dim(dout, &y_expand_t, dx);
}
if (dy) {
// dy_expand = dout * x
// dy = reduce(dy_expand)
auto dy_data = dy->mutable_data<int64_t>(ctx->GetPlace());
Tensor dy_expand_t;
int64_t* dy_expand_t_data =
dy_expand_t.mutable_data<int64_t>(in_x_t->dims(), ctx->GetPlace());
elementwise_mul_with_same_dim(dout, in_x_t, &dy_expand_t);
// reduce: dy_expand_t (dims: in_x_t->dims()) -> dy (dims: in_y_t->dims())
auto x_dims = in_x_t->dims();
auto y_dims = in_y_t->dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre = 0, n = 0, post = 0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((SHARE_NUM * n + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
hipLaunchKernelGGL(( add_dy<int64_t>), dim3(grid_size), dim3(block_size), 0, stream, dy_data, dy_expand_t_data, pre, post, n);
}
}
void elementwise_mul_with_same_dim(const Tensor *lhs, const Tensor *rhs, Tensor *out) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->mul(rhs_, out_);
}
} // aby3
} // operators
} // paddle
| d8b8d7530dea2bddda8f151ccff01826ff8c939b.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// Description: implementations of elementwise_add_op according to ABY3 protocol
#include "paddle/fluid/platform/transform.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/math/functors.h"
#include "core/paddlefl_mpc/mpc_protocol/context_holder.h"
#include "core/paddlefl_mpc/mpc_protocol/aby3_operators_impl/common.cu.h"
// #include "core/paddlefl_mpc/operators/math/elementwise_op_function.h"
#include "core/paddlefl_mpc/mpc_protocol/aby3_operators_impl/elementwise_op.h"
namespace paddle {
namespace operators {
namespace aby3 {
using paddle::framework::Tensor;
using namespace paddle::operators::math;
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
using ::aby3::ABY3Context;
using paddle::mpc::ContextHolder;
void add(const Tensor *lhs, const Tensor *rhs, Tensor *out, int axis) {
PADDLE_ENFORCE(lhs->dims()[0] == 2 && rhs->dims()[0] == 2,
"The first dimension of input x of protocol ABY3 should be equal to 2.");
if (lhs->dims() == rhs->dims()) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->add(rhs_, out_);
} else {
Tensor in_x_t_slice;
Tensor in_y_t_slice;
Tensor out_t_slice;
for (size_t i = 0; i < SHARE_NUM; ++i) {
in_x_t_slice = lhs->Slice(i, i + 1);
in_y_t_slice = rhs->Slice(i, i + 1);
out_t_slice = out->Slice(i, i + 1);
auto x_dims = in_x_t_slice.dims();
auto y_dims = in_y_t_slice.dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre=0, n=0, post=0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
auto x_ = in_x_t_slice.data<int64_t>();
auto y_ = in_y_t_slice.data<int64_t>();
auto out_ = out_t_slice.data<int64_t>();
auto nx_ = in_x_t_slice.numel();
paddle::platform::Transform<CUDADeviceContext> trans;
auto gpu_device_ctx = dynamic_cast<const CUDADeviceContext*>(
ContextHolder::device_ctx());
if (post == 1) {
trans(*gpu_device_ctx, x_, x_ + nx_,
paddle::operators::RowwiseTransformIterator<int64_t, CUDADeviceContext>(y_, n),
out_, paddle::operators::math::AddFunctor<int64_t>());
} else {
trans(*gpu_device_ctx, x_, x_ + nx_,
paddle::operators::MidWiseTransformIterator<int64_t, CUDADeviceContext>(y_, n, post),
out_, paddle::operators::math::AddFunctor<int64_t>());
}
}
}
}
template <typename T>
__global__ void add_dy(T* dy, const T* dout, size_t pre, size_t post, size_t n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < SHARE_NUM * n) {
int i = col / n;
int k = col % n;
T dy_ = 0;
for (int j = 0; j < pre; ++j) {
for (int m = 0; m < post; ++m) {
int out_offset = i * pre * n * post + j * n * post + k * post + m;
dy_ += dout[out_offset];
}
}
dy[col] = dy_;
col += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void copy_dx(T* dx, const T* dout, size_t size) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
dx[col] = dout[col];
col += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void set_zero(T* data, size_t size) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
data[col] = 0;
col += blockDim.x * gridDim.x;
}
}
void add_grad(const Tensor *in_x_t,
const Tensor *in_y_t,
const Tensor *dout,
Tensor *dx, Tensor *dy,
int axis) {
auto ctx = ContextHolder::exec_ctx();
auto dout_data = dout->data<int64_t>();
if (dx) {
auto dx_data = dx->mutable_data<int64_t>(ctx->GetPlace());
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size = dim3((dout->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
copy_dx<int64_t><<<grid_size, block_size, 0, stream>>>(dx_data, dout_data, dout->numel());
}
if (dy) {
auto dy_data = dy->mutable_data<int64_t>(ctx->GetPlace());
if (in_x_t->dims().size() == in_y_t->dims().size()) {
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((dout->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
copy_dx<int64_t><<<grid_size, block_size, 0, stream>>>(dy_data, dout_data, dout->numel());
} else {
auto x_dims = in_x_t->dims();
auto y_dims = in_y_t->dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre=0, n=0, post=0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((SHARE_NUM * n + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
add_dy<int64_t><<<grid_size, block_size, 0, stream>>>(
dy_data, dout_data, pre, post, n);
}
}
}
void sub(const Tensor *lhs, const Tensor *rhs, Tensor *out) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->sub(rhs_, out_);
}
template <typename DeviceContext, typename T>
void Expand(const framework::Tensor* in_y_t,
int axis, Tensor* y_expand_t,
const framework::DDim &expand_dims, const framework::ExecutionContext *ctx) {
T* y_expand_data = y_expand_t->mutable_data<T>(expand_dims, ctx->GetPlace());
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((y_expand_t->numel() + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
set_zero<int64_t><<<grid_size, block_size, 0, stream>>>(y_expand_data, y_expand_t->numel());
Tensor in_y_t_slice;
Tensor y_expand_t_slice;
for (size_t i = 0; i < SHARE_NUM; ++i) {
y_expand_t_slice = y_expand_t->Slice(i, i + 1);
in_y_t_slice = in_y_t->Slice(i, i + 1);
auto y_expand_dims = y_expand_t_slice.dims();
auto y_dims = in_y_t_slice.dims();
axis = (axis == -1 ? y_expand_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < y_expand_dims.size(),
"Axis should be in range [0, x_dims)");
int pre, n, post;
GetMidDims get_mid_dims;
get_mid_dims(y_expand_dims, y_dims, axis, &pre, &n, &post);
auto y_expand_ = y_expand_t_slice.data<T>();
auto y_ = in_y_t_slice.data<T>();
auto nx_ = y_expand_t_slice.numel();
paddle::platform::Transform<DeviceContext> trans;
if (post == 1) {
trans(ctx->template device_context<DeviceContext>(), y_expand_, y_expand_ + nx_,
paddle::operators::RowwiseTransformIterator<T, DeviceContext>(y_, n),
y_expand_, paddle::operators::math::AddFunctor<T>());
} else {
trans(ctx->template device_context<DeviceContext>(), y_expand_, y_expand_ + nx_,
paddle::operators::MidWiseTransformIterator<T, DeviceContext>(y_, n, post),
y_expand_, paddle::operators::math::AddFunctor<T>());
}
}
}
void elementwise_mul(const Tensor *in_x_t, const Tensor *in_y_t, Tensor *out_t, int axis) {
auto ctx = ContextHolder::exec_ctx();
if (in_x_t->dims() == in_y_t->dims()) {
elementwise_mul_with_same_dim(in_x_t, in_y_t, out_t);
} else {
Tensor y_expand_t;
//expand input in_y_t into y_expand_t (dims: in_x_t->dims)
Expand<CUDADeviceContext, int64_t>(in_y_t, axis, &y_expand_t, in_x_t->dims(), ctx);
elementwise_mul_with_same_dim(in_x_t, &y_expand_t, out_t);
}
}
void elementwise_mul_grad(const Tensor *in_x_t,
const Tensor *in_y_t,
const Tensor *dout,
Tensor *dx, Tensor *dy, int axis) {
auto ctx = ContextHolder::exec_ctx();
if (dx) {
// dx = dout * y_expand
auto dx_data = dx->mutable_data<int64_t>(ctx->GetPlace());
Tensor y_expand_t;
// expand in_y_t into y_expand_t (in_x_t->dims)
Expand<CUDADeviceContext, int64_t>(in_y_t, axis, &y_expand_t, in_x_t->dims(), ctx);
elementwise_mul_with_same_dim(dout, &y_expand_t, dx);
}
if (dy) {
// dy_expand = dout * x
// dy = reduce(dy_expand)
auto dy_data = dy->mutable_data<int64_t>(ctx->GetPlace());
Tensor dy_expand_t;
int64_t* dy_expand_t_data =
dy_expand_t.mutable_data<int64_t>(in_x_t->dims(), ctx->GetPlace());
elementwise_mul_with_same_dim(dout, in_x_t, &dy_expand_t);
// reduce: dy_expand_t (dims: in_x_t->dims()) -> dy (dims: in_y_t->dims())
auto x_dims = in_x_t->dims();
auto y_dims = in_y_t->dims();
axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis);
PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(),
"Axis should be in range [0, x_dims)");
int pre = 0, n = 0, post = 0;
GetMidDims get_mid_dims;
get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post);
dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1);
dim3 grid_size =
dim3((SHARE_NUM * n + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1);
auto stream = ctx->template device_context<paddle::platform::CUDADeviceContext>().stream();
add_dy<int64_t><<<grid_size, block_size, 0, stream>>>( dy_data, dy_expand_t_data, pre, post, n);
}
}
void elementwise_mul_with_same_dim(const Tensor *lhs, const Tensor *rhs, Tensor *out) {
auto lhs_tuple = from_tensor(lhs);
auto rhs_tuple = from_tensor(rhs);
auto out_tuple = from_tensor(out);
auto lhs_ = std::get<0>(lhs_tuple).get();
auto rhs_ = std::get<0>(rhs_tuple).get();
auto out_ = std::get<0>(out_tuple).get();
lhs_->mul(rhs_, out_);
}
} // aby3
} // operators
} // paddle
|
5ee22d3541029b8cf2790427fba6e0442af91fd5.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide HERK interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/gemm/device/rank_k.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/rank_k_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed_rank_k_universal.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on HIPBLAS_OP_N (column-major) input layouts
TEST(SM80_Device_Herk_cf32n_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kLower,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on HIPBLAS_OP_N (column-major) input layouts
TEST(SM80_Device_Herk_cf32n_cf32n_u_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kUpper,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on HIPBLAS_OP_C (row-major + conj) input layouts
TEST(SM80_Device_Herk_cf32h_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kLower,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
3, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kConjugate,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on HIPBLAS_OP_C (row-major + conj) input layouts
TEST(SM80_Device_Herk_cf32h_cf32n_u_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kUpper,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
3, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kConjugate,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
| 5ee22d3541029b8cf2790427fba6e0442af91fd5.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for device-wide HERK interface
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/blas3.h"
#include "cutlass/gemm/device/rank_k.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/rank_k_complex.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "testbed_rank_k_universal.h"
#if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on CUBLAS_OP_N (column-major) input layouts
TEST(SM80_Device_Herk_cf32n_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kLower,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on CUBLAS_OP_N (column-major) input layouts
TEST(SM80_Device_Herk_cf32n_cf32n_u_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kUpper,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
4, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kNone,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts
TEST(SM80_Device_Herk_cf32h_cf32n_l_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kLower,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
3, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kConjugate,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// HERK operator on CUBLAS_OP_C (row-major + conj) input layouts
TEST(SM80_Device_Herk_cf32h_cf32n_u_tensor_op_f32, 64x64x16_32x32x16) {
using ElementA = cutlass::complex<float>;
using LayoutA = cutlass::layout::RowMajor;
using ElementC = cutlass::complex<float>;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementAccumulator = cutlass::complex<float>;
using RankK = cutlass::gemm::device::RankK<
ElementA,
LayoutA,
ElementC,
LayoutC,
cutlass::FillMode::kUpper,
ElementAccumulator,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<16, 8, 8>,
cutlass::epilogue::thread::LinearCombination<
ElementC,
1,
ElementAccumulator,
ElementAccumulator
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
3, // kStages
1, // AlignmentA
false, // SplitKSerial
cutlass::arch::OpMultiplyAddComplex,
cutlass::ComplexTransform::kConjugate,
cutlass::BlasMode::kHermitian
>;
EXPECT_TRUE(test::gemm::device::TestAllRankKUniversal<RankK>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
|
6142aa272693280d8207d26a8c65b7f1c414df2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_2x2.cu, normal z -> c, Tue Aug 30 09:38:32 2016
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl_2x2_lower(
int m,
const magmaFloatComplex* W, int ldw,
magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaFloatComplex D21 = W( 1, 0 );
magmaFloatComplex D11 = MAGMA_C_DIV( W( 1, 1 ), D21 );
magmaFloatComplex D22 = MAGMA_C_DIV( W( 0, 0 ), MAGMA_C_CONJ( D21 ) );
float T = 1.0 / ( MAGMA_C_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_C_DIV( MAGMA_C_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_C_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl_2x2_upper(
int m,
const magmaFloatComplex *W, int ldw,
magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaFloatComplex D21 = W( m, 1 );
magmaFloatComplex D11 = MAGMA_C_DIV( W( m+1, 1 ), MAGMA_C_CONJ( D21 ) );
magmaFloatComplex D22 = MAGMA_C_DIV( W( m, 0 ), D21 );
float T = 1.0 / ( MAGMA_C_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_C_DIV( MAGMA_C_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_C_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
CLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW REAL vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_clascl_2x2_q(
magma_type_t type, magma_int_t m,
magmaFloatComplex_const_ptr dW, magma_int_t lddw,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( clascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( clascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
}
| 6142aa272693280d8207d26a8c65b7f1c414df2b.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlascl_2x2.cu, normal z -> c, Tue Aug 30 09:38:32 2016
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
clascl_2x2_lower(
int m,
const magmaFloatComplex* W, int ldw,
magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaFloatComplex D21 = W( 1, 0 );
magmaFloatComplex D11 = MAGMA_C_DIV( W( 1, 1 ), D21 );
magmaFloatComplex D22 = MAGMA_C_DIV( W( 0, 0 ), MAGMA_C_CONJ( D21 ) );
float T = 1.0 / ( MAGMA_C_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_C_DIV( MAGMA_C_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_C_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
clascl_2x2_upper(
int m,
const magmaFloatComplex *W, int ldw,
magmaFloatComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaFloatComplex D21 = W( m, 1 );
magmaFloatComplex D11 = MAGMA_C_DIV( W( m+1, 1 ), MAGMA_C_CONJ( D21 ) );
magmaFloatComplex D22 = MAGMA_C_DIV( W( m, 0 ), D21 );
float T = 1.0 / ( MAGMA_C_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_C_DIV( MAGMA_C_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_C_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
CLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW REAL vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_clascl_2x2_q(
magma_type_t type, magma_int_t m,
magmaFloatComplex_const_ptr dW, magma_int_t lddw,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
clascl_2x2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
else {
clascl_2x2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
}
|
e5882278ed0df409d7a125fdfc9524de042df4d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
int main()
{
int device_no;
//get device number
hipGetDeviceCount(&device_no);
//for each device find the props
int i, driverVersion, runtimeVersion;
for(i = 0; i < device_no; i++)
{
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, i);
printf("Name of device %d: %s\n", i, properties.name);
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf("\tCUDA driver version: %d.%d\n", driverVersion/1000, (driverVersion%100)/10);
printf("\tCUDA runtime Version: %d.%d\n", runtimeVersion/1000, (runtimeVersion%100)/10);
printf("\tCUDA capability version number: %d.%d\n", properties.major, properties.minor);
printf("\tMemory clock rate (KHz): %.0f Mhz\n", properties.memoryClockRate * 1e-3f);
printf("\tMemory bus width (bits): %d\n", properties.memoryBusWidth);
printf("\tPeak memory bandwidth: (GB/s): %f\n", 2.0*properties.memoryClockRate*(properties.memoryBusWidth/8)/1.0e6);
printf("\tTotal constant memory (bytes): %lu\n", properties.totalGlobalMem);
printf("\tTotal global memory: %.0f MBytes (%llu bytes)\n", (float)properties.totalGlobalMem/1048576.0f, (unsigned long long) properties.totalGlobalMem);
printf("\tMaximum shared memory available on a thread block (bytes): %lu\n", properties.sharedMemPerBlock);
printf("\tMaximum number of 32-bit registers on a thread block: %d\n", properties.regsPerBlock);
printf("\tWarp size: %d\n", properties.warpSize);
printf("\tMaximum number of threads per block: %d\n", properties.maxThreadsPerBlock);
printf("\tMaximum size of each dimension of a block: %d, %d, %d\n", properties.maxThreadsDim[0], properties.maxThreadsDim[1], properties.maxThreadsDim[2]);
printf("\tMaximum size of each dimension of a grid: %d, %d, %d\n", properties.maxGridSize[0], properties.maxGridSize[1], properties.maxGridSize[2]);
printf("\tClock Rate (KHz): %d\n\n", properties.clockRate);
}
}
| e5882278ed0df409d7a125fdfc9524de042df4d4.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
int main()
{
int device_no;
//get device number
cudaGetDeviceCount(&device_no);
//for each device find the props
int i, driverVersion, runtimeVersion;
for(i = 0; i < device_no; i++)
{
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, i);
printf("Name of device %d: %s\n", i, properties.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("\tCUDA driver version: %d.%d\n", driverVersion/1000, (driverVersion%100)/10);
printf("\tCUDA runtime Version: %d.%d\n", runtimeVersion/1000, (runtimeVersion%100)/10);
printf("\tCUDA capability version number: %d.%d\n", properties.major, properties.minor);
printf("\tMemory clock rate (KHz): %.0f Mhz\n", properties.memoryClockRate * 1e-3f);
printf("\tMemory bus width (bits): %d\n", properties.memoryBusWidth);
printf("\tPeak memory bandwidth: (GB/s): %f\n", 2.0*properties.memoryClockRate*(properties.memoryBusWidth/8)/1.0e6);
printf("\tTotal constant memory (bytes): %lu\n", properties.totalGlobalMem);
printf("\tTotal global memory: %.0f MBytes (%llu bytes)\n", (float)properties.totalGlobalMem/1048576.0f, (unsigned long long) properties.totalGlobalMem);
printf("\tMaximum shared memory available on a thread block (bytes): %lu\n", properties.sharedMemPerBlock);
printf("\tMaximum number of 32-bit registers on a thread block: %d\n", properties.regsPerBlock);
printf("\tWarp size: %d\n", properties.warpSize);
printf("\tMaximum number of threads per block: %d\n", properties.maxThreadsPerBlock);
printf("\tMaximum size of each dimension of a block: %d, %d, %d\n", properties.maxThreadsDim[0], properties.maxThreadsDim[1], properties.maxThreadsDim[2]);
printf("\tMaximum size of each dimension of a grid: %d, %d, %d\n", properties.maxGridSize[0], properties.maxGridSize[1], properties.maxGridSize[2]);
printf("\tClock Rate (KHz): %d\n\n", properties.clockRate);
}
}
|
fac64b7e6b50d4156f1077a994a0681eebd339ef.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "norm2_strided_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int xOffset = 1;
float *dx = NULL;
hipMalloc(&dx, XSIZE*YSIZE);
int incx = 1;
float result = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
norm2_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,dx,incx,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
norm2_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,dx,incx,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
norm2_strided_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,xOffset,dx,incx,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fac64b7e6b50d4156f1077a994a0681eebd339ef.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "norm2_strided_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int xOffset = 1;
float *dx = NULL;
cudaMalloc(&dx, XSIZE*YSIZE);
int incx = 1;
float result = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
norm2_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,dx,incx,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
norm2_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,dx,incx,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
norm2_strided_float<<<gridBlock,threadBlock>>>(n,xOffset,dx,incx,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ee5282b1f714ed8ac853f692c2bddd0ff0faf402.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA split kernel entry points */
#ifdef __CUDA_ARCH__
#define __SPLIT_KERNEL__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "kernel/split/kernel_split_common.h"
#include "kernel/split/kernel_data_init.h"
#include "kernel/split/kernel_path_init.h"
#include "kernel/split/kernel_scene_intersect.h"
#include "kernel/split/kernel_lamp_emission.h"
#include "kernel/split/kernel_do_volume.h"
#include "kernel/split/kernel_queue_enqueue.h"
#include "kernel/split/kernel_indirect_background.h"
#include "kernel/split/kernel_shader_setup.h"
#include "kernel/split/kernel_shader_sort.h"
#include "kernel/split/kernel_shader_eval.h"
#include "kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h"
#include "kernel/split/kernel_subsurface_scatter.h"
#include "kernel/split/kernel_direct_lighting.h"
#include "kernel/split/kernel_shadow_blocked_ao.h"
#include "kernel/split/kernel_shadow_blocked_dl.h"
#include "kernel/split/kernel_enqueue_inactive.h"
#include "kernel/split/kernel_next_iteration_setup.h"
#include "kernel/split/kernel_indirect_subsurface.h"
#include "kernel/split/kernel_buffer_update.h"
#include "kernel/kernel_film.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_state_buffer_size(uint num_threads, uint64_t *size)
{
*size = split_data_buffer_size(NULL, num_threads);
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace_data_init(
ccl_global void *split_data_buffer,
int num_elements,
ccl_global char *ray_state,
int start_sample,
int end_sample,
int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index,
int queuesize,
ccl_global char *use_queues_flag,
ccl_global unsigned int *work_pool_wgs,
unsigned int num_samples,
ccl_global float *buffer)
{
kernel_data_init(NULL,
NULL,
split_data_buffer,
num_elements,
ray_state,
start_sample,
end_sample,
sx, sy, sw, sh, offset, stride,
Queue_index,
queuesize,
use_queues_flag,
work_pool_wgs,
num_samples,
buffer);
}
#define DEFINE_SPLIT_KERNEL_FUNCTION(name) \
extern "C" __global__ void \
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_SPLIT_MAX_REGISTERS) \
kernel_cuda_##name() \
{ \
kernel_##name(NULL); \
}
#define DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(name, type) \
extern "C" __global__ void \
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_SPLIT_MAX_REGISTERS) \
kernel_cuda_##name() \
{ \
ccl_local type locals; \
kernel_##name(NULL, &locals); \
}
DEFINE_SPLIT_KERNEL_FUNCTION(path_init)
DEFINE_SPLIT_KERNEL_FUNCTION(scene_intersect)
DEFINE_SPLIT_KERNEL_FUNCTION(lamp_emission)
DEFINE_SPLIT_KERNEL_FUNCTION(do_volume)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(queue_enqueue, QueueEnqueueLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(indirect_background)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(shader_setup, uint)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(shader_sort, ShaderSortLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(shader_eval)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(holdout_emission_blurring_pathtermination_ao, BackgroundAOLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(subsurface_scatter)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(direct_lighting, uint)
DEFINE_SPLIT_KERNEL_FUNCTION(shadow_blocked_ao)
DEFINE_SPLIT_KERNEL_FUNCTION(shadow_blocked_dl)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(enqueue_inactive, uint)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(next_iteration_setup, uint)
DEFINE_SPLIT_KERNEL_FUNCTION(indirect_subsurface)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(buffer_update, uint)
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
#endif
| ee5282b1f714ed8ac853f692c2bddd0ff0faf402.cu | /*
* Copyright 2011-2016 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CUDA split kernel entry points */
#ifdef __CUDA_ARCH__
#define __SPLIT_KERNEL__
#include "kernel/kernel_compat_cuda.h"
#include "kernel_config.h"
#include "kernel/split/kernel_split_common.h"
#include "kernel/split/kernel_data_init.h"
#include "kernel/split/kernel_path_init.h"
#include "kernel/split/kernel_scene_intersect.h"
#include "kernel/split/kernel_lamp_emission.h"
#include "kernel/split/kernel_do_volume.h"
#include "kernel/split/kernel_queue_enqueue.h"
#include "kernel/split/kernel_indirect_background.h"
#include "kernel/split/kernel_shader_setup.h"
#include "kernel/split/kernel_shader_sort.h"
#include "kernel/split/kernel_shader_eval.h"
#include "kernel/split/kernel_holdout_emission_blurring_pathtermination_ao.h"
#include "kernel/split/kernel_subsurface_scatter.h"
#include "kernel/split/kernel_direct_lighting.h"
#include "kernel/split/kernel_shadow_blocked_ao.h"
#include "kernel/split/kernel_shadow_blocked_dl.h"
#include "kernel/split/kernel_enqueue_inactive.h"
#include "kernel/split/kernel_next_iteration_setup.h"
#include "kernel/split/kernel_indirect_subsurface.h"
#include "kernel/split/kernel_buffer_update.h"
#include "kernel/kernel_film.h"
/* kernels */
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_state_buffer_size(uint num_threads, uint64_t *size)
{
*size = split_data_buffer_size(NULL, num_threads);
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_path_trace_data_init(
ccl_global void *split_data_buffer,
int num_elements,
ccl_global char *ray_state,
int start_sample,
int end_sample,
int sx, int sy, int sw, int sh, int offset, int stride,
ccl_global int *Queue_index,
int queuesize,
ccl_global char *use_queues_flag,
ccl_global unsigned int *work_pool_wgs,
unsigned int num_samples,
ccl_global float *buffer)
{
kernel_data_init(NULL,
NULL,
split_data_buffer,
num_elements,
ray_state,
start_sample,
end_sample,
sx, sy, sw, sh, offset, stride,
Queue_index,
queuesize,
use_queues_flag,
work_pool_wgs,
num_samples,
buffer);
}
#define DEFINE_SPLIT_KERNEL_FUNCTION(name) \
extern "C" __global__ void \
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_SPLIT_MAX_REGISTERS) \
kernel_cuda_##name() \
{ \
kernel_##name(NULL); \
}
#define DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(name, type) \
extern "C" __global__ void \
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_SPLIT_MAX_REGISTERS) \
kernel_cuda_##name() \
{ \
ccl_local type locals; \
kernel_##name(NULL, &locals); \
}
DEFINE_SPLIT_KERNEL_FUNCTION(path_init)
DEFINE_SPLIT_KERNEL_FUNCTION(scene_intersect)
DEFINE_SPLIT_KERNEL_FUNCTION(lamp_emission)
DEFINE_SPLIT_KERNEL_FUNCTION(do_volume)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(queue_enqueue, QueueEnqueueLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(indirect_background)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(shader_setup, uint)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(shader_sort, ShaderSortLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(shader_eval)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(holdout_emission_blurring_pathtermination_ao, BackgroundAOLocals)
DEFINE_SPLIT_KERNEL_FUNCTION(subsurface_scatter)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(direct_lighting, uint)
DEFINE_SPLIT_KERNEL_FUNCTION(shadow_blocked_ao)
DEFINE_SPLIT_KERNEL_FUNCTION(shadow_blocked_dl)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(enqueue_inactive, uint)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(next_iteration_setup, uint)
DEFINE_SPLIT_KERNEL_FUNCTION(indirect_subsurface)
DEFINE_SPLIT_KERNEL_FUNCTION_LOCALS(buffer_update, uint)
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
extern "C" __global__ void
CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
{
int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
if(x < sx + sw && y < sy + sh)
kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
}
#endif
|
54636b8120764399cad3a6505725ecc01a3ff5e7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This code is released into the public domain.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include"map"
#include"readubyte.h"
#include"LeNet.cuh"
using namespace std;
// Application parameters
const int GPUID = 0;//The GPU ID to use
const int TrainingIteration = 5;//Number of TrainingIteration for training
const int RandomSeed = 0;//Override random seed (default uses random_device)
const int TestImageSize = -1;//Number of images to TestImageSize to compute error rate (default uses entire test set)
// Batch parameters
const int batch_size = 32;//Batch size for training
// Filenames
#define _MNIST
//#define _CIFAR10
#ifdef _MNIST
const string strTrainDataPath = "../Dataset/timg.bin";//Training images filename
const string strTrainLabelPath = "../Dataset/tlabel.bin";//Training labels filename
const string strTestDataPath = "../Dataset/test_img.bin";//Test images filename
const string strTestLabelPath = "../Dataset/test_label.bin";//Test labels filename
#endif
#ifdef _CIFAR10
const vector<string> TrainDataPath = { "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin" };//Training images filename
const vector<string> TestDataPath = { "test_batch.bin" };//Test images filename
#endif
// Solver parameters
const double LearningRate = 0.01;//Base learning rate
const double Gamma = 0.0001;//Learning rate policy gamma
const double Power = 0.75;//Learning rate policy power
_SAN_PAIR_DEF(RESULT_PAIR, float, Error, 0.0, float, Base, 0.0, , );
RESULT_PAIR Evaluate(TrainingContext &context, cDataLayer &DataLayer, const vector<float> &DataSet, const vector<float> &LabelSet, const size_t DataSetSize, const cDimension &DataSetShape, cFullyConnectedLayer &FC)
{
RESULT_PAIR Result;
const int BatchSize = DataSetShape.batches;
const int BatchNumber = DataSetSize / DataSetShape.batches;
const int PerBatchSize = DataSetShape.size;
for (int seek_batch = 0; seek_batch < BatchNumber; seek_batch++)
{
int Offset = seek_batch * PerBatchSize;
// Prepare current batch on device
DataLayer.iOutput().iWrite(&DataSet[Offset], PerBatchSize, 0);
// Forward propagate test image
context.Forward(DataLayer);
FC.iOutput().iSynchronize();
for (int seek = 0; seek < BatchSize; seek = seek + 1)
{
// Determine classification according to maximal response
auto pOutput = FC.iOutput().iGetPtr(HOST_MEM) + seek * 10;
vector<float> Vec(10, 0.0);
for (int ID = 0; ID < 10; ID = ID + 1) { Vec[ID] = pOutput[ID]; }
int MaxID = 0;
for (int ID = 1; ID < 10; ++ID)
{
MaxID = pOutput[MaxID] < pOutput[ID] ? ID : MaxID;
}
Result.Error = Result.Error + (MaxID == LabelSet[Result.Base + seek] ? 0 : 1);
}
Result.Base = Result.Base + BatchSize;
}
return Result;
}
int main(int argc, char **argv)
{
cDimension Block(1, 1, 1);
// Open input data
std::printf("Reading input data\n");
// Read dataset sizes
#ifdef _MNIST
size_t train_size = MNISTDataSetLoader(strTrainDataPath, strTrainLabelPath, vector<float>(), vector<float>(), Block);
size_t test_size = MNISTDataSetLoader(strTestDataPath, strTestLabelPath, vector<float>(), vector<float>(), Block);
#endif
#ifdef _CIFAR10
size_t train_size = CIFAR10DataSetLoader(TrainDataPath, vector<float>(), vector<float>(), Block);
size_t test_size = CIFAR10DataSetLoader(TestDataPath, vector<float>(), vector<float>(), Block);
#endif
if (train_size == 0) { return 1; }
Block.iUpdate();
vector<float> TrainingImageSet(train_size * Block.size);
vector<float> TrainingLabelSet(train_size);
vector<float> TestImageSet(test_size * Block.size);
vector<float> TestLabelSet(test_size);
// Read data from datasets
#ifdef _MNIST
if (MNISTDataSetLoader(strTrainDataPath, strTrainLabelPath, TrainingImageSet, TrainingLabelSet, Block) != train_size) { return 2; }
if (MNISTDataSetLoader(strTestDataPath, strTestLabelPath, TestImageSet, TestLabelSet, Block) != test_size) { return 3; }
#endif
#ifdef _CIFAR10
if (CIFAR10DataSetLoader(TrainDataPath, TrainingImageSet, TrainingLabelSet, Block) != train_size) { return 2; }
if (CIFAR10DataSetLoader(TestDataPath, TestImageSet, TestLabelSet, Block) != test_size) { return 3; }
#endif
std::printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int) train_size, (int) test_size);
std::printf("Batch size: %lld, TrainingIteration: %d\n", batch_size, TrainingIteration);
// Choose GPU
int num_GPUIDs;
checkCudaErrors(hipGetDeviceCount(&num_GPUIDs), LOCATION_STRING);
if (GPUID < 0 || GPUID >= num_GPUIDs)
{
printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", GPUID, num_GPUIDs);
return 4;
}
Block.batches = batch_size;
Block.iUpdate();
// Create the LeNet network architecture
cDataLayer DataLayer(Block);
cConvLayer conv1(cDimension(5, 5, 1, 1), 20);
cMaxPoolLayer pool1(cDimension(2, 2, 1));
cConvLayer conv2(cDimension(5, 5, 1, 1), 50);
cMaxPoolLayer pool2(cDimension(2, 2, 1));
cFullyConnectedLayer fc1(500, cFullyConnectedLayer::FT_RELU);
cFullyConnectedLayer fc2(10, cFullyConnectedLayer::FT_SOFTMAX);
cLabelLayer LabelLayer(fc2);
// Initialize CUDNN/CUBLAS training context
TrainingContext context(GPUID);
context.iPushLayer(&DataLayer);
context.iPushLayer(&conv1);
context.iPushLayer(&pool1);
context.iPushLayer(&conv2);
context.iPushLayer(&pool2);
context.iPushLayer(&fc1);
context.iPushLayer(&fc2);
context.iPushLayer(&LabelLayer);
// Create random network
random_device RandomDevice;
default_random_engine Engine(RandomSeed < 0 ? RandomDevice() : static_cast<unsigned int>(RandomSeed));
context.iInitNetwork(Engine);
// Forward propagation data
cMemObj<float> LabelSet;
LabelSet.iResize(cDimension(1, 1, 1, Block.batches));
std::printf("Training...\n");
// Use SGD to train the network
checkCudaErrors(hipDeviceSynchronize(), LOCATION_STRING);
const int PerBatchSize = Block.size;
const int BatchNumber = train_size / Block.batches;
::cout << "Epoch\tPer Epoch TC\tPer Batch TC\tTraining Error\tTest Error\r\n";
for (int seek_epoch = 0; seek_epoch < TrainingIteration; ++seek_epoch)
{
::cout << "#" << seek_epoch + 1 << "\t";
const float CurrentLearningRate = static_cast<float>(LearningRate * pow((1.0 + Gamma * seek_epoch), (-Power)));
auto StartTime = chrono::high_resolution_clock::now();
for (int seek_batch = 0; seek_batch < BatchNumber; seek_batch = seek_batch + 1)
{
// Prepare current batch on device
checkCudaErrors(hipMemcpyAsync(DataLayer.iOutput().iGetPtr(), &TrainingImageSet[seek_batch * PerBatchSize], sizeof(float) * PerBatchSize, hipMemcpyHostToDevice), LOCATION_STRING);
checkCudaErrors(hipMemcpyAsync(LabelSet.iGetPtr(), &TrainingLabelSet[seek_batch * Block.batches], sizeof(float) * Block.batches, hipMemcpyHostToDevice), LOCATION_STRING);
context.Forward(DataLayer);
context.Backward(DataLayer, LabelLayer, LabelSet, CurrentLearningRate);
}
auto Cost = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - StartTime).count();
RESULT_PAIR TrainRes = Evaluate(context, DataLayer, TrainingImageSet, TrainingLabelSet, train_size, Block, fc2);
RESULT_PAIR TestRes = Evaluate(context, DataLayer, TestImageSet, TestLabelSet, test_size, Block, fc2);
std::printf("%6.2f s\t%6.2f ms\t%.4f%%\t\t%.4f%%\n", Cost / 1000000.0f, (Cost / 1000.0f) / BatchNumber, (TrainRes.Error / TrainRes.Base) * 100.0f, (TestRes.Error / TestRes.Base) * 100.0f);
}
checkCudaErrors(hipDeviceSynchronize(), LOCATION_STRING);
const int TestSize = TestImageSize < 0 ? (int) test_size : TestImageSize;
// Test the resulting neural network's classification
if (TestSize > 0)
{
RESULT_PAIR Result = Evaluate(context, DataLayer, TestImageSet, TestLabelSet, TestSize, Block, fc2);
std::printf("Test result: %.2f%% error [%d/%d]\r\n", (Result.Error / Result.Base) * 100.0f, static_cast<int>(Result.Error), static_cast<int>(Result.Base));
}
// Free data structures
checkCudaErrors(hipSetDevice(GPUID), LOCATION_STRING);
::system("pause");
return 0;
}
| 54636b8120764399cad3a6505725ecc01a3ff5e7.cu | /*
* This code is released into the public domain.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include"map"
#include"readubyte.h"
#include"LeNet.cuh"
using namespace std;
// Application parameters
const int GPUID = 0;//The GPU ID to use
const int TrainingIteration = 5;//Number of TrainingIteration for training
const int RandomSeed = 0;//Override random seed (default uses random_device)
const int TestImageSize = -1;//Number of images to TestImageSize to compute error rate (default uses entire test set)
// Batch parameters
const int batch_size = 32;//Batch size for training
// Filenames
#define _MNIST
//#define _CIFAR10
#ifdef _MNIST
const string strTrainDataPath = "../Dataset/timg.bin";//Training images filename
const string strTrainLabelPath = "../Dataset/tlabel.bin";//Training labels filename
const string strTestDataPath = "../Dataset/test_img.bin";//Test images filename
const string strTestLabelPath = "../Dataset/test_label.bin";//Test labels filename
#endif
#ifdef _CIFAR10
const vector<string> TrainDataPath = { "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin" };//Training images filename
const vector<string> TestDataPath = { "test_batch.bin" };//Test images filename
#endif
// Solver parameters
const double LearningRate = 0.01;//Base learning rate
const double Gamma = 0.0001;//Learning rate policy gamma
const double Power = 0.75;//Learning rate policy power
_SAN_PAIR_DEF(RESULT_PAIR, float, Error, 0.0, float, Base, 0.0, , );
RESULT_PAIR Evaluate(TrainingContext &context, cDataLayer &DataLayer, const vector<float> &DataSet, const vector<float> &LabelSet, const size_t DataSetSize, const cDimension &DataSetShape, cFullyConnectedLayer &FC)
{
RESULT_PAIR Result;
const int BatchSize = DataSetShape.batches;
const int BatchNumber = DataSetSize / DataSetShape.batches;
const int PerBatchSize = DataSetShape.size;
for (int seek_batch = 0; seek_batch < BatchNumber; seek_batch++)
{
int Offset = seek_batch * PerBatchSize;
// Prepare current batch on device
DataLayer.iOutput().iWrite(&DataSet[Offset], PerBatchSize, 0);
// Forward propagate test image
context.Forward(DataLayer);
FC.iOutput().iSynchronize();
for (int seek = 0; seek < BatchSize; seek = seek + 1)
{
// Determine classification according to maximal response
auto pOutput = FC.iOutput().iGetPtr(HOST_MEM) + seek * 10;
vector<float> Vec(10, 0.0);
for (int ID = 0; ID < 10; ID = ID + 1) { Vec[ID] = pOutput[ID]; }
int MaxID = 0;
for (int ID = 1; ID < 10; ++ID)
{
MaxID = pOutput[MaxID] < pOutput[ID] ? ID : MaxID;
}
Result.Error = Result.Error + (MaxID == LabelSet[Result.Base + seek] ? 0 : 1);
}
Result.Base = Result.Base + BatchSize;
}
return Result;
}
int main(int argc, char **argv)
{
cDimension Block(1, 1, 1);
// Open input data
std::printf("Reading input data\n");
// Read dataset sizes
#ifdef _MNIST
size_t train_size = MNISTDataSetLoader(strTrainDataPath, strTrainLabelPath, vector<float>(), vector<float>(), Block);
size_t test_size = MNISTDataSetLoader(strTestDataPath, strTestLabelPath, vector<float>(), vector<float>(), Block);
#endif
#ifdef _CIFAR10
size_t train_size = CIFAR10DataSetLoader(TrainDataPath, vector<float>(), vector<float>(), Block);
size_t test_size = CIFAR10DataSetLoader(TestDataPath, vector<float>(), vector<float>(), Block);
#endif
if (train_size == 0) { return 1; }
Block.iUpdate();
vector<float> TrainingImageSet(train_size * Block.size);
vector<float> TrainingLabelSet(train_size);
vector<float> TestImageSet(test_size * Block.size);
vector<float> TestLabelSet(test_size);
// Read data from datasets
#ifdef _MNIST
if (MNISTDataSetLoader(strTrainDataPath, strTrainLabelPath, TrainingImageSet, TrainingLabelSet, Block) != train_size) { return 2; }
if (MNISTDataSetLoader(strTestDataPath, strTestLabelPath, TestImageSet, TestLabelSet, Block) != test_size) { return 3; }
#endif
#ifdef _CIFAR10
if (CIFAR10DataSetLoader(TrainDataPath, TrainingImageSet, TrainingLabelSet, Block) != train_size) { return 2; }
if (CIFAR10DataSetLoader(TestDataPath, TestImageSet, TestLabelSet, Block) != test_size) { return 3; }
#endif
std::printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int) train_size, (int) test_size);
std::printf("Batch size: %lld, TrainingIteration: %d\n", batch_size, TrainingIteration);
// Choose GPU
int num_GPUIDs;
checkCudaErrors(cudaGetDeviceCount(&num_GPUIDs), LOCATION_STRING);
if (GPUID < 0 || GPUID >= num_GPUIDs)
{
printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", GPUID, num_GPUIDs);
return 4;
}
Block.batches = batch_size;
Block.iUpdate();
// Create the LeNet network architecture
cDataLayer DataLayer(Block);
cConvLayer conv1(cDimension(5, 5, 1, 1), 20);
cMaxPoolLayer pool1(cDimension(2, 2, 1));
cConvLayer conv2(cDimension(5, 5, 1, 1), 50);
cMaxPoolLayer pool2(cDimension(2, 2, 1));
cFullyConnectedLayer fc1(500, cFullyConnectedLayer::FT_RELU);
cFullyConnectedLayer fc2(10, cFullyConnectedLayer::FT_SOFTMAX);
cLabelLayer LabelLayer(fc2);
// Initialize CUDNN/CUBLAS training context
TrainingContext context(GPUID);
context.iPushLayer(&DataLayer);
context.iPushLayer(&conv1);
context.iPushLayer(&pool1);
context.iPushLayer(&conv2);
context.iPushLayer(&pool2);
context.iPushLayer(&fc1);
context.iPushLayer(&fc2);
context.iPushLayer(&LabelLayer);
// Create random network
random_device RandomDevice;
default_random_engine Engine(RandomSeed < 0 ? RandomDevice() : static_cast<unsigned int>(RandomSeed));
context.iInitNetwork(Engine);
// Forward propagation data
cMemObj<float> LabelSet;
LabelSet.iResize(cDimension(1, 1, 1, Block.batches));
std::printf("Training...\n");
// Use SGD to train the network
checkCudaErrors(cudaDeviceSynchronize(), LOCATION_STRING);
const int PerBatchSize = Block.size;
const int BatchNumber = train_size / Block.batches;
::cout << "Epoch\tPer Epoch TC\tPer Batch TC\tTraining Error\tTest Error\r\n";
for (int seek_epoch = 0; seek_epoch < TrainingIteration; ++seek_epoch)
{
::cout << "#" << seek_epoch + 1 << "\t";
const float CurrentLearningRate = static_cast<float>(LearningRate * pow((1.0 + Gamma * seek_epoch), (-Power)));
auto StartTime = chrono::high_resolution_clock::now();
for (int seek_batch = 0; seek_batch < BatchNumber; seek_batch = seek_batch + 1)
{
// Prepare current batch on device
checkCudaErrors(cudaMemcpyAsync(DataLayer.iOutput().iGetPtr(), &TrainingImageSet[seek_batch * PerBatchSize], sizeof(float) * PerBatchSize, cudaMemcpyHostToDevice), LOCATION_STRING);
checkCudaErrors(cudaMemcpyAsync(LabelSet.iGetPtr(), &TrainingLabelSet[seek_batch * Block.batches], sizeof(float) * Block.batches, cudaMemcpyHostToDevice), LOCATION_STRING);
context.Forward(DataLayer);
context.Backward(DataLayer, LabelLayer, LabelSet, CurrentLearningRate);
}
auto Cost = chrono::duration_cast<chrono::microseconds>(chrono::high_resolution_clock::now() - StartTime).count();
RESULT_PAIR TrainRes = Evaluate(context, DataLayer, TrainingImageSet, TrainingLabelSet, train_size, Block, fc2);
RESULT_PAIR TestRes = Evaluate(context, DataLayer, TestImageSet, TestLabelSet, test_size, Block, fc2);
std::printf("%6.2f s\t%6.2f ms\t%.4f%%\t\t%.4f%%\n", Cost / 1000000.0f, (Cost / 1000.0f) / BatchNumber, (TrainRes.Error / TrainRes.Base) * 100.0f, (TestRes.Error / TestRes.Base) * 100.0f);
}
checkCudaErrors(cudaDeviceSynchronize(), LOCATION_STRING);
const int TestSize = TestImageSize < 0 ? (int) test_size : TestImageSize;
// Test the resulting neural network's classification
if (TestSize > 0)
{
RESULT_PAIR Result = Evaluate(context, DataLayer, TestImageSet, TestLabelSet, TestSize, Block, fc2);
std::printf("Test result: %.2f%% error [%d/%d]\r\n", (Result.Error / Result.Base) * 100.0f, static_cast<int>(Result.Error), static_cast<int>(Result.Base));
}
// Free data structures
checkCudaErrors(cudaSetDevice(GPUID), LOCATION_STRING);
::system("pause");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.