hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
c124ecce5bab26043f1399e04ef7f1396f4aafc0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "JobChooser.cuh"
#include <algorithm>
MaxClauseJobChooser::MaxClauseJobChooser(
std::vector<Clause> const& formula,
size_t n_vars,
size_t n_dead_vars,
size_t threads,
size_t n_blocks)
: var_chooser(formula, n_vars)
, JobChooser()
, n_working_vars { n_vars - n_dead_vars }
, n_threads { threads }
, n_blocks { n_blocks }
, evaluated { false }
, strategy { ChoosingStrategy::DISTRIBUTE_JOBS_PER_THREAD }
{
}
MaxClauseJobChooser::MaxClauseJobChooser(
std::vector<Clause> const& formula,
size_t n_vars,
size_t n_dead_vars,
size_t threads,
size_t n_blocks,
ChoosingStrategy strategy)
: var_chooser(formula, n_vars)
, JobChooser()
, n_working_vars { n_vars - n_dead_vars }
, n_threads { threads }
, n_blocks { n_blocks }
, evaluated { false }
, strategy { strategy }
{
}
MaxClauseJobChooser::~MaxClauseJobChooser()
{
}
bool MaxClauseJobChooser::is_evaluated()
{
return evaluated;
}
void MaxClauseJobChooser::evaluate()
{
evalVarPerJobs();
chosen_vars.resize(vars_per_job);
var_chooser.evaluate();
for (size_t i = 0; i < vars_per_job; i++) {
#ifdef USE_ASSERTIONS
assert(var_chooser.has_next_var());
#endif
chosen_vars[i] = var_chooser.next_var();
}
evaluated = true;
}
void MaxClauseJobChooser::getJobs(JobsQueue& queue)
{
#ifdef USE_ASSERTIONS
assert(evaluated);
#endif
m_fixed_lits.resize(vars_per_job);
addJobs(m_fixed_lits.data(), 0, queue);
}
void MaxClauseJobChooser::addJobs(Lit *fixed_lits, size_t n_fixed_lits, JobsQueue& queue)
{
if (n_fixed_lits == vars_per_job) {
Job job = mkJob_dev(fixed_lits, n_fixed_lits);
queue.add(job);
return;
}
Lit lit = mkLit(chosen_vars[n_fixed_lits], true);
fixed_lits[n_fixed_lits] = lit;
addJobs(fixed_lits, n_fixed_lits + 1, queue);
lit = mkLit(chosen_vars[n_fixed_lits], false);
fixed_lits[n_fixed_lits] = lit;
addJobs(fixed_lits, n_fixed_lits + 1, queue);
}
size_t MaxClauseJobChooser::get_n_jobs()
{
#ifdef USE_ASSERTIONS
assert(evaluated);
#endif
return static_cast<size_t>(::pow(2, static_cast<double>(vars_per_job)));
}
void MaxClauseJobChooser::evalVarPerJobs()
{
switch(strategy) {
case ChoosingStrategy::DISTRIBUTE_JOBS_PER_THREAD:
evalVarPerJobsDistribute();
break;
case ChoosingStrategy::UNIFORM:
evalVarPerJobsUniform();
break;
default:
assert(false);
break;
}
}
void MaxClauseJobChooser::evalVarPerJobsDistribute()
{
double expected = (n_threads * n_blocks) * JOBS_PER_THREAD;
double max = ::pow(2, std::min<size_t>(std::max<size_t>((n_working_vars - MIN_FREE_VARS), 1), 62));
#ifdef USE_ASSERTIONS
assert(max > 0);
#endif
expected = ::min(expected, max);
vars_per_job = static_cast<size_t>(std::log2(expected)) + 1 ;
vars_per_job = std::min<size_t>(vars_per_job, MAX_VARS);
}
void MaxClauseJobChooser::evalVarPerJobsUniform()
{
size_t expected = UNIFORM_NUMBER_OF_VARS;
size_t max = std::max<size_t>(n_working_vars - MIN_FREE_VARS, 1);
vars_per_job = std::min<size_t>(std::min<size_t>(expected, max), MAX_VARS);
}
|
c124ecce5bab26043f1399e04ef7f1396f4aafc0.cu
|
#include "JobChooser.cuh"
#include <algorithm>
MaxClauseJobChooser::MaxClauseJobChooser(
std::vector<Clause> const& formula,
size_t n_vars,
size_t n_dead_vars,
size_t threads,
size_t n_blocks)
: var_chooser(formula, n_vars)
, JobChooser()
, n_working_vars { n_vars - n_dead_vars }
, n_threads { threads }
, n_blocks { n_blocks }
, evaluated { false }
, strategy { ChoosingStrategy::DISTRIBUTE_JOBS_PER_THREAD }
{
}
MaxClauseJobChooser::MaxClauseJobChooser(
std::vector<Clause> const& formula,
size_t n_vars,
size_t n_dead_vars,
size_t threads,
size_t n_blocks,
ChoosingStrategy strategy)
: var_chooser(formula, n_vars)
, JobChooser()
, n_working_vars { n_vars - n_dead_vars }
, n_threads { threads }
, n_blocks { n_blocks }
, evaluated { false }
, strategy { strategy }
{
}
MaxClauseJobChooser::~MaxClauseJobChooser()
{
}
bool MaxClauseJobChooser::is_evaluated()
{
return evaluated;
}
void MaxClauseJobChooser::evaluate()
{
evalVarPerJobs();
chosen_vars.resize(vars_per_job);
var_chooser.evaluate();
for (size_t i = 0; i < vars_per_job; i++) {
#ifdef USE_ASSERTIONS
assert(var_chooser.has_next_var());
#endif
chosen_vars[i] = var_chooser.next_var();
}
evaluated = true;
}
void MaxClauseJobChooser::getJobs(JobsQueue& queue)
{
#ifdef USE_ASSERTIONS
assert(evaluated);
#endif
m_fixed_lits.resize(vars_per_job);
addJobs(m_fixed_lits.data(), 0, queue);
}
void MaxClauseJobChooser::addJobs(Lit *fixed_lits, size_t n_fixed_lits, JobsQueue& queue)
{
if (n_fixed_lits == vars_per_job) {
Job job = mkJob_dev(fixed_lits, n_fixed_lits);
queue.add(job);
return;
}
Lit lit = mkLit(chosen_vars[n_fixed_lits], true);
fixed_lits[n_fixed_lits] = lit;
addJobs(fixed_lits, n_fixed_lits + 1, queue);
lit = mkLit(chosen_vars[n_fixed_lits], false);
fixed_lits[n_fixed_lits] = lit;
addJobs(fixed_lits, n_fixed_lits + 1, queue);
}
size_t MaxClauseJobChooser::get_n_jobs()
{
#ifdef USE_ASSERTIONS
assert(evaluated);
#endif
return static_cast<size_t>(std::pow(2, static_cast<double>(vars_per_job)));
}
void MaxClauseJobChooser::evalVarPerJobs()
{
switch(strategy) {
case ChoosingStrategy::DISTRIBUTE_JOBS_PER_THREAD:
evalVarPerJobsDistribute();
break;
case ChoosingStrategy::UNIFORM:
evalVarPerJobsUniform();
break;
default:
assert(false);
break;
}
}
void MaxClauseJobChooser::evalVarPerJobsDistribute()
{
double expected = (n_threads * n_blocks) * JOBS_PER_THREAD;
double max = std::pow(2, std::min<size_t>(std::max<size_t>((n_working_vars - MIN_FREE_VARS), 1), 62));
#ifdef USE_ASSERTIONS
assert(max > 0);
#endif
expected = std::min(expected, max);
vars_per_job = static_cast<size_t>(std::log2(expected)) + 1 ;
vars_per_job = std::min<size_t>(vars_per_job, MAX_VARS);
}
void MaxClauseJobChooser::evalVarPerJobsUniform()
{
size_t expected = UNIFORM_NUMBER_OF_VARS;
size_t max = std::max<size_t>(n_working_vars - MIN_FREE_VARS, 1);
vars_per_job = std::min<size_t>(std::min<size_t>(expected, max), MAX_VARS);
}
|
989fb1196c285cf64f9d1f4e2e5fd5bbf87c38ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *inMatrix = NULL;
hipMalloc(&inMatrix, XSIZE*YSIZE);
float *outMatrix = NULL;
hipMalloc(&outMatrix, XSIZE*YSIZE);
const size_t row = XSIZE*YSIZE;
const size_t column = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, inMatrix,outMatrix,row,column);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, inMatrix,outMatrix,row,column);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, inMatrix,outMatrix,row,column);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
989fb1196c285cf64f9d1f4e2e5fd5bbf87c38ac.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *inMatrix = NULL;
cudaMalloc(&inMatrix, XSIZE*YSIZE);
float *outMatrix = NULL;
cudaMalloc(&outMatrix, XSIZE*YSIZE);
const size_t row = XSIZE*YSIZE;
const size_t column = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transGPU<<<gridBlock,threadBlock>>>(inMatrix,outMatrix,row,column);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transGPU<<<gridBlock,threadBlock>>>(inMatrix,outMatrix,row,column);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transGPU<<<gridBlock,threadBlock>>>(inMatrix,outMatrix,row,column);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4a84a9783e138d5e7178a606fe95cf532c7fe7e9.hip
|
// !!! This is a file automatically generated by hipify!!!
// Author: Hengquan Zhang
// This file contains source code for PET image reconstruction from coincidence data in binary format.
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <stdlib.h>
#include <math.h>
#include <cmath>
#include <cstdlib>
#include <sstream>
#include <iomanip>
#include <limits>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <sys/time.h>
using namespace std;
#include "global_var.h"
#include "func.h"
#include "projection.h"
#include "timing.h"
#include "lor.h"
#include "macro.h"
//////////////////////////////////////
// Main function.
//////////////////////////////////////
int main(int argc, char* argv[])
{
vector<string> vpara;
string line;
stringstream ss;
ifstream config ("configRecon.txt");
int itenum; //number of iterations.
///////////////////////////////////
// read "configRecon.txt" file.
///////////////////////////////////
if (config.is_open())
{
while ( getline (config,line) )
{
if(line.length() == 0) continue;
vpara=explode(line,' ');
if(vpara[0]=="FOV") {ss<<vpara[2];ss>>bndry[0];ss.clear();ss<<vpara[3];ss>>bndry[1];ss.clear();ss<<vpara[4];ss>>bndry[2];ss.clear();} else
if(vpara[0]=="GridSize") {ss<<vpara[2];ss>>a;ss.clear();} else
if(vpara[0]=="TorHalfWidth") {ss<<vpara[2];ss>>torhw;ss.clear();} else
if(vpara[0]=="TorSigma") {ss<<vpara[2];ss>>torsgm;ss.clear();} else
if(vpara[0]=="NumberOfIterations") {ss<<vpara[2];ss>>itenum;ss.clear();} else
if(vpara[0]=="Regularization") {ss<<vpara[2];ss>>rgl;ss.clear();} else
if(vpara[0]=="Normalization") {ss<<vpara[2];ss>>norma;ss.clear();} else
if(vpara[0]=="ThreshNorm") {ss<<vpara[2];ss>>ThreshNorm;ss.clear();} else
if(vpara[0]=="BetaR") {ss<<vpara[2];ss>>beta;ss.clear();} else
if(vpara[0]=="BlurR") {ss<<vpara[2];ss>>blur;ss.clear();} else
if(vpara[0]=="XsigmaRB") {ss<<vpara[2];ss>>bsgm[0];ss.clear();} else
if(vpara[0]=="YsigmaRB") {ss<<vpara[2];ss>>bsgm[1];ss.clear();} else
if(vpara[0]=="ZsigmaRB") {ss<<vpara[2];ss>>bsgm[2];ss.clear();} else
if(vpara[0]=="ImagePSF") {ss<<vpara[2];ss>>imagepsf;ss.clear();} else
if(vpara[0]=="PSFSigma") {
ss<<vpara[2];ss>>psfsgm[0];ss.clear();
ss<<vpara[3];ss>>psfsgm[1];ss.clear();
ss<<vpara[4];ss>>psfsgm[2];ss.clear();} else
if(vpara[0]=="MaskFile") {ss<<vpara[2];ss>>maskFileName;ss.clear();} else
if(vpara[0]=="VoxelSize") {
ss<<vpara[2];ss>>psfVoxelSize[0];ss.clear();
ss<<vpara[3];ss>>psfVoxelSize[1];ss.clear();
ss<<vpara[4];ss>>psfVoxelSize[2];ss.clear();} else
if(vpara[0]=="BlurParaFile") {ss<<vpara[2];ss>>blurParaFile;ss.clear();} else
if(vpara[0]=="BlurParaNum") {
for(int i=0; i<2; i++) {ss<<vpara[2+i];ss>>blurParaNum[i];ss.clear();}
} else
if(vpara[0]=="RBCubeSize" && blur == 3) {
for(int i=0; i<3; i++) {ss<<vpara[2+i];ss>>indr[i]; ss.clear();} // Initialize indr here!!
}
}
config.close();
}
else cout << "Unable to open config file"<<endl;
////////////////////////////////////////////////////////////////////////////
// print the values read from "configRecon.txt" file for correctness check.
////////////////////////////////////////////////////////////////////////////
cout<<"-------------------------------------------"<<endl;
cout<<"Input parameters:"<<endl;
cout<<"FOV: "<<bndry[0]<<" mm x "<<bndry[1]<<" mm x "<<bndry[2]<<" mm"<<endl;
cout<<"Grid size: "<<a<<" mm"<<endl;
cout<<"TOR half width: "<<torhw<<" mm"<<endl;
cout<<"TOR sigma: "<<torsgm<<" mm"<<endl;
cout<<"Number of iterations: "<<itenum<<endl;
cout<<"Normalization?: "<<norma<<endl;
if(norma != 0) cout<<"ThreshNorm: "<<ThreshNorm<<endl;
cout<<"Regularization?: "<<rgl<<endl;
cout<<"Use image-based PSF?: "<<imagepsf<<endl;
float *blurpara, *dev_blurpara, *dev_allweight; // Use when blur==3
if(rgl==1)
{
cout<<"Beta for regularization: "<<beta<<endl;
cout<<"Blur?: "<<blur<<endl;
if(blur==1)
{
cout<<"Xsigma for blur: "<<bsgm[0]<<" mm"<<endl;
cout<<"Ysigma for blur: "<<bsgm[1]<<" mm"<<endl;
cout<<"Zsigma for blur: "<<bsgm[2]<<" mm"<<endl;
for(int i=0; i<3; i++) {
rads[i] = bsgm[i] * sqrt(-2. * log (bthresh));
indr[i] = trunc(rads[i]/a);
if(bsgm[i] == 0) bsgm[i] = 1.0;
}
weight_scale_2 = total_weight(indr, bsgm);
}
else if(blur==3)
{
cout<<"Blur parameter file name: "<<blurParaFile <<endl;
cout<<"Blur parameter number: "<<blurParaNum[0] << " " << blurParaNum[1] <<endl;
cout<<"Blur cube size: "<<indr[0] << " " << indr[1] << " " << indr[2] << endl;
for(int i=0; i<3; i++) indr[i]=(indr[i]-1)/2;
int tparasize = blurParaNum[0] * blurParaNum[1];
readTxt<float> (blurParaFile, blurpara, tparasize);
cout<<"Blur parameters: ";
for(int i=0; i<tparasize; i++) cout<<blurpara[i]<<" ";
cout<<endl;
hipMalloc((void**) &dev_blurpara, tparasize*sizeof(float) );
hipMemcpy(dev_blurpara, blurpara, tparasize * sizeof(float), hipMemcpyHostToDevice);
}
}
else beta = 0.;
float *psfImage; // Use when imagepsf==2
if(imagepsf == 1) {
cout<<"PSFSigma: "<<psfsgm[0]<<" "<<psfsgm[1]<<" "<<psfsgm[2]<<" mm"<<endl;
for(int i=0; i<3; i++) {
psfrads[i] = psfsgm[i] * sqrt(-2. * log (bthresh));
psfindr[i] = trunc(psfrads[i]/a);
if(psfsgm[i] == 0) psfsgm[i] = 1.0;
}
weight_scale_1 = total_weight(psfindr, psfsgm);
}
else if(imagepsf == 2) {
cout<<"PSF File Name: "<<maskFileName << endl;
cout << "PSF Image Voxel Size: " << psfVoxelSize[0] << " " << psfVoxelSize[1] << " " << psfVoxelSize[2] << endl;
readBinary<float> (maskFileName, psfImage, psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2]);
cout << "Finish reading PSF image." << endl;
weight_scale_1 = total_weight_image(psfImage, psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2]);
}
cout<<"-------------------------------------------"<<endl;
///////////////////////////////////////////
// Initialization work.
///////////////////////////////////////////
// calculate number of voxels in each axis
msize = ceil(bndry[0] / a) * ceil(bndry[1] / a) *ceil( bndry[2] / a);
int nx = ceil(bndry[0] / a);
int ny = ceil(bndry[1] / a);
int nz = ceil( bndry[2] / a);
bndry[0] = nx * a; bndry[1] = ny * a; bndry[2] = nz * a;
cout<<"FOV: "<<bndry[0]<<" mm x "<<bndry[1]<<" mm x "<<bndry[2]<<" mm"<<endl;
cout << "Dimension of images: " << nx << " x " << ny << " x " << nz << endl;
//copy fundamental variables to cuda constant memory.
int *temp_imageindex;
temp_imageindex = (int*) malloc(4 * sizeof(int));
temp_imageindex[0] = nx;
temp_imageindex[1] = ny;
temp_imageindex[2] = nz;
temp_imageindex[3] = msize;
float *temp_info;
temp_info = (float*) malloc(4 * sizeof(float));
temp_info[0] = a;
temp_info[1] = torhw;
temp_info[2] = pow(torsgm,2);
temp_info[3] = beta;
hipMemcpyToSymbol(d_imageindex, temp_imageindex, 4 * sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_info, temp_info, 4 * sizeof(float), 0, hipMemcpyHostToDevice);
// define variables for image recon.
float *dev_smatrix, *dev_snmatrix, *dev_poimage, *dev_bmatrix;
float allave, *gave, *dev_gave, hostAvep;
timinggpu.StartCounter();
hipMalloc((void**) &dev_smatrix, msize*sizeof(float) );
hipMalloc((void**) &dev_snmatrix, msize*sizeof(float) );
smatrix = (float*) malloc(msize * sizeof(float));
snmatrix = (float*) malloc(msize * sizeof(float));
gave = (float*) malloc(blocksPerGrid * sizeof(float));
hipMalloc((void**) &dev_gave, blocksPerGrid*sizeof(float) );
timeall.memoryIO += timinggpu.GetCounter();
dim3 threads(threadsperBlock, threadsperBlock);
nummainaxis = (int*) malloc(3 * sizeof(int));
vector<double> logLikelihood, logR; //value of objective functions in all iterations
float *dev_blur_smatrix, *dev_blur_snmatrix; // For image-based PSF
if (imagepsf > 0) {
hipMalloc((void**) &dev_blur_smatrix, msize*sizeof(float) );
hipMalloc((void**) &dev_blur_snmatrix, msize*sizeof(float) );
}
if(rgl == 1) {
string rglname=argv[3];
readBinary<float> (rglname, poimage, msize); // read prior image into memory
timinggpu.StartCounter();
hipMalloc((void**) &dev_poimage, msize*sizeof(float) );
hipMalloc((void**) &dev_bmatrix, msize*sizeof(float) );
hipMemcpy( dev_poimage, poimage, msize*sizeof(float), hipMemcpyHostToDevice );
timeall.memoryIO += timinggpu.GetCounter();
// calculate average value of voxels in the prior image.
allave = 0.0;
hipLaunchKernelGGL(( calave), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_poimage, dev_gave);
hipMemcpy(gave, dev_gave, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost);
for(int jj=0; jj< blocksPerGrid; jj++) allave += gave[jj];
allave /= msize;
hipMemcpyToSymbol(avep, &allave, sizeof(float), 0, hipMemcpyHostToDevice);
cout<<"Prior image average value: "<<allave<<endl;
hostAvep = allave; //for storing A_P
// Calculate dev_allweight
if (blur == 3) {
hipMalloc((void**) &dev_allweight, msize*sizeof(float));
hipMemset( dev_allweight, 0, msize*sizeof(float));
hipMemcpyToSymbol(d_indr, indr, 3 * sizeof(int), 0, hipMemcpyHostToDevice);
int blockWidth = 8;
dim3 dimBlock(blockWidth, blockWidth, blockWidth);
dim3 dimGrid(ceil(nx*1.0f/blockWidth), ceil(ny*1.0f/blockWidth), ceil(nz*1.0f/blockWidth));
hipLaunchKernelGGL(( total_weight_variant), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_allweight, dev_blurpara, blurParaNum[0], blurParaNum[1]);
}
}
// normalization
string filenorm=argv[4]; // name of the file that contains coincidence data for normalization
get_normal_image(filenorm, nx, ny, nz); // get values for normimage and dev_normimage if norma>0.
// Initialize image-based psf model for forward and backward projection. Use when imagepsf==2.
float *dev_psfImage;
if(imagepsf == 2) {
int totalSize = psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2];
hipMalloc((void**) &dev_psfImage, totalSize*sizeof(float) );
hipMemcpy(dev_psfImage, psfImage, totalSize * sizeof(float), hipMemcpyHostToDevice);
}
// With image based PSF, normimage needs blurring
if(imagepsf > 0 && norma > 0) {
float *dev_bnorm;
hipMalloc((void**) &dev_bnorm, msize*sizeof(float) );
if (imagepsf == 1) blur_wrapper(dev_normimage, dev_bnorm, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
else if (imagepsf == 2) blur_wrapper(dev_normimage, dev_bnorm, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
hipMemcpy(normimage, dev_bnorm, msize*sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_bnorm);
}
// If use normalization, scale normimage here.
if(norma > 0) {
float maxnorm = 0.0;
for(int i = 0; i < msize; i++)
if(maxnorm < normimage[i]) maxnorm = normimage[i];
for(int i = 0; i < msize; i++) {
float sen_temp = normimage[i] / maxnorm;
if(sen_temp < ThreshNorm) sen_temp = ThreshNorm;
normimage[i] = sen_temp;
}
hipMemcpy(dev_normimage, normimage, msize*sizeof(float), hipMemcpyHostToDevice);
}
// open file that contains input lors for image reconstruction. This should be after normalization file read.
string filein=argv[1];
cout<<"Sorting LORs and copying to device memory......"<<endl;
preplor(filein); // read lors in the file, sort lors, copy to cuda
cout<<"Finish sorting and copying."<<endl;
hipMemcpyToSymbol(d_lorindex, nummainaxis, 3 * sizeof(int), 0, hipMemcpyHostToDevice);
cout<<"Number of LORs in each main axis (x,y,x): "<<nummainaxis[0]<<" "<<nummainaxis[1]<<" "<<nummainaxis[2]<<endl;
//re-define beta, beta_new = beta * A / 2. Also initialize smatrix as an uniform image.
if(norma == 0) {
beta = beta * float(numline)/msize / 2.0f;
for(int i=0; i<msize; i++) smatrix[i] = float(numline)/msize;
}
else {
float sumNormimage = 0.0f;
for(int i=0; i< msize; i++) sumNormimage += normimage[i];
beta = beta * float(numline)/sumNormimage / 2.0f;
for(int i=0; i<msize; i++) smatrix[i] = float(numline)/sumNormimage;
}
temp_info[3] = beta;
hipMemcpyToSymbol(d_info, temp_info, 4 * sizeof(float), 0, hipMemcpyHostToDevice);
//calculate the average voxel value in the target image. In this case, it remains unchanged across iterations.
allave = smatrix[0];
hipMemcpyToSymbol(aves, &allave, sizeof(float), 0, hipMemcpyHostToDevice);
float hostAve = allave; //for storing the value of A.
///////////////////////////////////////////
//start iterations for image reconstruction
///////////////////////////////////////////
for(int ij=0; ij<itenum; ij++){
double templogLikelihood = 0.0;
double templogR = 0.0;
cout<<"Starting "<<ij<<" iteration."<<endl;
timinggpu.StartCounter();
hipMemcpy( dev_smatrix, smatrix, msize*sizeof(float), hipMemcpyHostToDevice );
hipMemset( dev_snmatrix, 0, msize*sizeof(float));
hipMemset( dev_xlor.linevalue, 0, nummainaxis[0]*sizeof(float));
hipMemset( dev_ylor.linevalue, 0, nummainaxis[1]*sizeof(float));
hipMemset( dev_zlor.linevalue, 0, nummainaxis[2]*sizeof(float));
timeall.memoryIO += timinggpu.GetCounter();
// Forward and backward projection
if(imagepsf == 1) {
blur_wrapper(dev_smatrix, dev_blur_smatrix, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_blur_smatrix, dev_snmatrix);
blur_wrapper(dev_snmatrix, dev_blur_snmatrix, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
hipMemcpy( dev_snmatrix, dev_blur_snmatrix, msize*sizeof(float), hipMemcpyDeviceToDevice );
}
else if(imagepsf == 2) {
blur_wrapper(dev_smatrix, dev_blur_smatrix, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_blur_smatrix, dev_snmatrix);
blur_wrapper(dev_snmatrix, dev_blur_snmatrix, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
hipMemcpy( dev_snmatrix, dev_blur_snmatrix, msize*sizeof(float), hipMemcpyDeviceToDevice );
}
else if(imagepsf == 0) proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_smatrix, dev_snmatrix);
else cout << "Unknown identifier for imagepsf!!" << endl;
// Post processing
if(rgl == 0)
{
if(norma == 0){
timinggpu.StartCounter();
hipLaunchKernelGGL(( calnewmatrix000), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
hipLaunchKernelGGL(( calnewmatrix100), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_normimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else if(rgl == 1 && blur == 0)
{
if(norma == 0){
timinggpu.StartCounter();
hipLaunchKernelGGL(( calnewmatrix010), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_poimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
hipLaunchKernelGGL(( calnewmatrix110), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else if(rgl == 1 && blur > 0)
{
//calculate the voxel values in blurred target image
hipMemset( dev_bmatrix, 0, msize*sizeof(float));
if(blur == 1) blur_wrapper( dev_smatrix, dev_bmatrix, NULL, bsgm, rads, indr, weight_scale_2);
else if(blur == 3) blur_wrapper(dev_smatrix, dev_bmatrix, dev_allweight, indr, dev_blurpara, blurParaNum[0], blurParaNum[1]) ;
//calculate new image for this iteration
if(norma == 0){
timinggpu.StartCounter();
if(blur==1)hipLaunchKernelGGL(( calnewmatrix011<Blur_Gaussian_Invariant>), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_poimage, dev_bmatrix, NULL, weight_scale_2, NULL, NULL, 0, 0);
else if(blur==3)hipLaunchKernelGGL(( calnewmatrix011<Blur_Gaussian_Variant>), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_poimage, dev_bmatrix, dev_allweight, 0, NULL, dev_blurpara, blurParaNum[0], blurParaNum[1]);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
if(blur==1)hipLaunchKernelGGL(( calnewmatrix111<Blur_Gaussian_Invariant>), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage, dev_bmatrix, NULL, weight_scale_2, NULL, NULL, 0, 0);
else if(blur==3)hipLaunchKernelGGL(( calnewmatrix111<Blur_Gaussian_Variant>), dim3(blocksPerGrid), dim3(threads), 0, 0, dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage, dev_bmatrix, dev_allweight, 0, NULL, dev_blurpara, blurParaNum[0], blurParaNum[1]);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else cout<<"Unknown indentifier for regularization or blur!!!"<<endl;
// Copy new image from device to host memory.
timinggpu.StartCounter();
hipMemcpy(snmatrix, dev_snmatrix, msize*sizeof(float), hipMemcpyDeviceToHost);
timeall.memoryIO += timinggpu.GetCounter();
cout<<"Finish "<<ij<<" iteration."<<endl;
//write new image to file.
ostringstream convert;
convert<<(ij+1);
string fileout=argv[2];
fileout.append(convert.str());
writeBinary<float> (fileout, snmatrix, msize);
//calculate objective function values
double *gloglike, *dev_gloglike;
gloglike = (double*) malloc(blocksPerGrid * sizeof(double));
hipMalloc((void**) &dev_gloglike, blocksPerGrid*sizeof(double) );
hipLaunchKernelGGL(( calLogLike), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_xlor.linevalue, dev_gloglike, nummainaxis[0]);
hipMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
hipLaunchKernelGGL(( calLogLike), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_ylor.linevalue, dev_gloglike, nummainaxis[1]);
hipMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
hipLaunchKernelGGL(( calLogLike), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_zlor.linevalue, dev_gloglike, nummainaxis[2]);
hipMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
hipLaunchKernelGGL(( calLogLikeS), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_smatrix, dev_normimage, dev_gloglike, msize, norma);
hipMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
if(rgl == 1 && blur == 0)hipLaunchKernelGGL(( calLogR), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_smatrix, dev_poimage, dev_gloglike, msize);
else if(rgl == 1 && blur > 0)hipLaunchKernelGGL(( calLogR), dim3(blocksPerGrid), dim3(reducsize), 0, 0, dev_bmatrix, dev_poimage, dev_gloglike, msize);
if(rgl == 1) {
hipMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), hipMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogR += gloglike[iobj];
}
templogR *= beta;
logLikelihood.push_back(templogLikelihood);
logR.push_back(templogR);
//prepare for next iteration
for(int iii=0; iii< msize; iii++)
{
smatrix[iii] = snmatrix[iii];
snmatrix[iii] = 0.;
}
}
/////////////////////////////////////////////////////
// save the value of objective function to file.
/////////////////////////////////////////////////////
ofstream fObjFunc ("ObjectiveFuncValue.txt");
if(fObjFunc.is_open()){
for (int i=0; i< itenum; i++) fObjFunc << i << " "<< logLikelihood[i] << " " << logR[i] << " " << logLikelihood[i] + logR[i] << endl;
}
else cout<< "Can not open ObjectiveFuncValue.txt!!" <<endl;
fObjFunc.close();
timeall.printvalue(); //print out timing information about cuda execution.
///////////////////////////////////////////////////////////
// Free dynamically allocated memory and GPU global memory.
///////////////////////////////////////////////////////////
hipFree(dev_xlor.x1); hipFree(dev_xlor.y1); hipFree(dev_xlor.z1); hipFree(dev_xlor.x2); hipFree(dev_xlor.y2); hipFree(dev_xlor.z2);
hipFree(dev_ylor.x1); hipFree(dev_ylor.y1); hipFree(dev_ylor.z1); hipFree(dev_ylor.x2); hipFree(dev_ylor.y2); hipFree(dev_ylor.z2);
hipFree(dev_zlor.x1); hipFree(dev_zlor.y1); hipFree(dev_zlor.z1); hipFree(dev_zlor.x2); hipFree(dev_zlor.y2); hipFree(dev_zlor.z2);
hipFree(dev_xlor.linevalue); hipFree(dev_ylor.linevalue); hipFree(dev_zlor.linevalue);
hipFree(dev_smatrix);
hipFree(dev_snmatrix);
free(nummainaxis);
free(temp_imageindex);
free(temp_info);
free(xlor.x1); free(xlor.y1); free(xlor.z1); free(xlor.x2); free(xlor.y2); free(xlor.z2);
free(ylor.x1); free(ylor.y1); free(ylor.z1); free(ylor.x2); free(ylor.y2); free(ylor.z2);
free(zlor.x1); free(zlor.y1); free(zlor.z1); free(zlor.x2); free(zlor.y2); free(zlor.z2);
free(xlor.linevalue); free(ylor.linevalue); free(zlor.linevalue);
free(smatrix);
free(snmatrix);
if(norma > 0) {free(normimage); hipFree(dev_normimage);}
if(imagepsf == 2) {free(psfImage); hipFree(dev_psfImage);}
if(imagepsf > 0) {hipFree(dev_blur_smatrix); hipFree(dev_blur_snmatrix);}
if(rgl == 1 && blur == 3) {free(blurpara); hipFree(dev_allweight); hipFree(dev_blurpara);}
if(rgl == 1) {free(poimage); hipFree(dev_poimage); hipFree(dev_bmatrix);}
return 0;
}
|
4a84a9783e138d5e7178a606fe95cf532c7fe7e9.cu
|
// Author: Hengquan Zhang
// This file contains source code for PET image reconstruction from coincidence data in binary format.
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <stdlib.h>
#include <math.h>
#include <cmath>
#include <cstdlib>
#include <sstream>
#include <iomanip>
#include <limits>
#include <cuda_runtime.h>
#include "device_functions.h"
#include <sys/time.h>
using namespace std;
#include "global_var.h"
#include "func.h"
#include "projection.h"
#include "timing.h"
#include "lor.h"
#include "macro.h"
//////////////////////////////////////
// Main function.
//////////////////////////////////////
int main(int argc, char* argv[])
{
vector<string> vpara;
string line;
stringstream ss;
ifstream config ("configRecon.txt");
int itenum; //number of iterations.
///////////////////////////////////
// read "configRecon.txt" file.
///////////////////////////////////
if (config.is_open())
{
while ( getline (config,line) )
{
if(line.length() == 0) continue;
vpara=explode(line,' ');
if(vpara[0]=="FOV") {ss<<vpara[2];ss>>bndry[0];ss.clear();ss<<vpara[3];ss>>bndry[1];ss.clear();ss<<vpara[4];ss>>bndry[2];ss.clear();} else
if(vpara[0]=="GridSize") {ss<<vpara[2];ss>>a;ss.clear();} else
if(vpara[0]=="TorHalfWidth") {ss<<vpara[2];ss>>torhw;ss.clear();} else
if(vpara[0]=="TorSigma") {ss<<vpara[2];ss>>torsgm;ss.clear();} else
if(vpara[0]=="NumberOfIterations") {ss<<vpara[2];ss>>itenum;ss.clear();} else
if(vpara[0]=="Regularization") {ss<<vpara[2];ss>>rgl;ss.clear();} else
if(vpara[0]=="Normalization") {ss<<vpara[2];ss>>norma;ss.clear();} else
if(vpara[0]=="ThreshNorm") {ss<<vpara[2];ss>>ThreshNorm;ss.clear();} else
if(vpara[0]=="BetaR") {ss<<vpara[2];ss>>beta;ss.clear();} else
if(vpara[0]=="BlurR") {ss<<vpara[2];ss>>blur;ss.clear();} else
if(vpara[0]=="XsigmaRB") {ss<<vpara[2];ss>>bsgm[0];ss.clear();} else
if(vpara[0]=="YsigmaRB") {ss<<vpara[2];ss>>bsgm[1];ss.clear();} else
if(vpara[0]=="ZsigmaRB") {ss<<vpara[2];ss>>bsgm[2];ss.clear();} else
if(vpara[0]=="ImagePSF") {ss<<vpara[2];ss>>imagepsf;ss.clear();} else
if(vpara[0]=="PSFSigma") {
ss<<vpara[2];ss>>psfsgm[0];ss.clear();
ss<<vpara[3];ss>>psfsgm[1];ss.clear();
ss<<vpara[4];ss>>psfsgm[2];ss.clear();} else
if(vpara[0]=="MaskFile") {ss<<vpara[2];ss>>maskFileName;ss.clear();} else
if(vpara[0]=="VoxelSize") {
ss<<vpara[2];ss>>psfVoxelSize[0];ss.clear();
ss<<vpara[3];ss>>psfVoxelSize[1];ss.clear();
ss<<vpara[4];ss>>psfVoxelSize[2];ss.clear();} else
if(vpara[0]=="BlurParaFile") {ss<<vpara[2];ss>>blurParaFile;ss.clear();} else
if(vpara[0]=="BlurParaNum") {
for(int i=0; i<2; i++) {ss<<vpara[2+i];ss>>blurParaNum[i];ss.clear();}
} else
if(vpara[0]=="RBCubeSize" && blur == 3) {
for(int i=0; i<3; i++) {ss<<vpara[2+i];ss>>indr[i]; ss.clear();} // Initialize indr here!!
}
}
config.close();
}
else cout << "Unable to open config file"<<endl;
////////////////////////////////////////////////////////////////////////////
// print the values read from "configRecon.txt" file for correctness check.
////////////////////////////////////////////////////////////////////////////
cout<<"-------------------------------------------"<<endl;
cout<<"Input parameters:"<<endl;
cout<<"FOV: "<<bndry[0]<<" mm x "<<bndry[1]<<" mm x "<<bndry[2]<<" mm"<<endl;
cout<<"Grid size: "<<a<<" mm"<<endl;
cout<<"TOR half width: "<<torhw<<" mm"<<endl;
cout<<"TOR sigma: "<<torsgm<<" mm"<<endl;
cout<<"Number of iterations: "<<itenum<<endl;
cout<<"Normalization?: "<<norma<<endl;
if(norma != 0) cout<<"ThreshNorm: "<<ThreshNorm<<endl;
cout<<"Regularization?: "<<rgl<<endl;
cout<<"Use image-based PSF?: "<<imagepsf<<endl;
float *blurpara, *dev_blurpara, *dev_allweight; // Use when blur==3
if(rgl==1)
{
cout<<"Beta for regularization: "<<beta<<endl;
cout<<"Blur?: "<<blur<<endl;
if(blur==1)
{
cout<<"Xsigma for blur: "<<bsgm[0]<<" mm"<<endl;
cout<<"Ysigma for blur: "<<bsgm[1]<<" mm"<<endl;
cout<<"Zsigma for blur: "<<bsgm[2]<<" mm"<<endl;
for(int i=0; i<3; i++) {
rads[i] = bsgm[i] * sqrt(-2. * log (bthresh));
indr[i] = trunc(rads[i]/a);
if(bsgm[i] == 0) bsgm[i] = 1.0;
}
weight_scale_2 = total_weight(indr, bsgm);
}
else if(blur==3)
{
cout<<"Blur parameter file name: "<<blurParaFile <<endl;
cout<<"Blur parameter number: "<<blurParaNum[0] << " " << blurParaNum[1] <<endl;
cout<<"Blur cube size: "<<indr[0] << " " << indr[1] << " " << indr[2] << endl;
for(int i=0; i<3; i++) indr[i]=(indr[i]-1)/2;
int tparasize = blurParaNum[0] * blurParaNum[1];
readTxt<float> (blurParaFile, blurpara, tparasize);
cout<<"Blur parameters: ";
for(int i=0; i<tparasize; i++) cout<<blurpara[i]<<" ";
cout<<endl;
cudaMalloc((void**) &dev_blurpara, tparasize*sizeof(float) );
cudaMemcpy(dev_blurpara, blurpara, tparasize * sizeof(float), cudaMemcpyHostToDevice);
}
}
else beta = 0.;
float *psfImage; // Use when imagepsf==2
if(imagepsf == 1) {
cout<<"PSFSigma: "<<psfsgm[0]<<" "<<psfsgm[1]<<" "<<psfsgm[2]<<" mm"<<endl;
for(int i=0; i<3; i++) {
psfrads[i] = psfsgm[i] * sqrt(-2. * log (bthresh));
psfindr[i] = trunc(psfrads[i]/a);
if(psfsgm[i] == 0) psfsgm[i] = 1.0;
}
weight_scale_1 = total_weight(psfindr, psfsgm);
}
else if(imagepsf == 2) {
cout<<"PSF File Name: "<<maskFileName << endl;
cout << "PSF Image Voxel Size: " << psfVoxelSize[0] << " " << psfVoxelSize[1] << " " << psfVoxelSize[2] << endl;
readBinary<float> (maskFileName, psfImage, psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2]);
cout << "Finish reading PSF image." << endl;
weight_scale_1 = total_weight_image(psfImage, psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2]);
}
cout<<"-------------------------------------------"<<endl;
///////////////////////////////////////////
// Initialization work.
///////////////////////////////////////////
// calculate number of voxels in each axis
msize = ceil(bndry[0] / a) * ceil(bndry[1] / a) *ceil( bndry[2] / a);
int nx = ceil(bndry[0] / a);
int ny = ceil(bndry[1] / a);
int nz = ceil( bndry[2] / a);
bndry[0] = nx * a; bndry[1] = ny * a; bndry[2] = nz * a;
cout<<"FOV: "<<bndry[0]<<" mm x "<<bndry[1]<<" mm x "<<bndry[2]<<" mm"<<endl;
cout << "Dimension of images: " << nx << " x " << ny << " x " << nz << endl;
//copy fundamental variables to cuda constant memory.
int *temp_imageindex;
temp_imageindex = (int*) malloc(4 * sizeof(int));
temp_imageindex[0] = nx;
temp_imageindex[1] = ny;
temp_imageindex[2] = nz;
temp_imageindex[3] = msize;
float *temp_info;
temp_info = (float*) malloc(4 * sizeof(float));
temp_info[0] = a;
temp_info[1] = torhw;
temp_info[2] = pow(torsgm,2);
temp_info[3] = beta;
cudaMemcpyToSymbol(d_imageindex, temp_imageindex, 4 * sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_info, temp_info, 4 * sizeof(float), 0, cudaMemcpyHostToDevice);
// define variables for image recon.
float *dev_smatrix, *dev_snmatrix, *dev_poimage, *dev_bmatrix;
float allave, *gave, *dev_gave, hostAvep;
timinggpu.StartCounter();
cudaMalloc((void**) &dev_smatrix, msize*sizeof(float) );
cudaMalloc((void**) &dev_snmatrix, msize*sizeof(float) );
smatrix = (float*) malloc(msize * sizeof(float));
snmatrix = (float*) malloc(msize * sizeof(float));
gave = (float*) malloc(blocksPerGrid * sizeof(float));
cudaMalloc((void**) &dev_gave, blocksPerGrid*sizeof(float) );
timeall.memoryIO += timinggpu.GetCounter();
dim3 threads(threadsperBlock, threadsperBlock);
nummainaxis = (int*) malloc(3 * sizeof(int));
vector<double> logLikelihood, logR; //value of objective functions in all iterations
float *dev_blur_smatrix, *dev_blur_snmatrix; // For image-based PSF
if (imagepsf > 0) {
cudaMalloc((void**) &dev_blur_smatrix, msize*sizeof(float) );
cudaMalloc((void**) &dev_blur_snmatrix, msize*sizeof(float) );
}
if(rgl == 1) {
string rglname=argv[3];
readBinary<float> (rglname, poimage, msize); // read prior image into memory
timinggpu.StartCounter();
cudaMalloc((void**) &dev_poimage, msize*sizeof(float) );
cudaMalloc((void**) &dev_bmatrix, msize*sizeof(float) );
cudaMemcpy( dev_poimage, poimage, msize*sizeof(float), cudaMemcpyHostToDevice );
timeall.memoryIO += timinggpu.GetCounter();
// calculate average value of voxels in the prior image.
allave = 0.0;
calave<<<blocksPerGrid, reducsize>>>(dev_poimage, dev_gave);
cudaMemcpy(gave, dev_gave, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
for(int jj=0; jj< blocksPerGrid; jj++) allave += gave[jj];
allave /= msize;
cudaMemcpyToSymbol(avep, &allave, sizeof(float), 0, cudaMemcpyHostToDevice);
cout<<"Prior image average value: "<<allave<<endl;
hostAvep = allave; //for storing A_P
// Calculate dev_allweight
if (blur == 3) {
cudaMalloc((void**) &dev_allweight, msize*sizeof(float));
cudaMemset( dev_allweight, 0, msize*sizeof(float));
cudaMemcpyToSymbol(d_indr, indr, 3 * sizeof(int), 0, cudaMemcpyHostToDevice);
int blockWidth = 8;
dim3 dimBlock(blockWidth, blockWidth, blockWidth);
dim3 dimGrid(ceil(nx*1.0f/blockWidth), ceil(ny*1.0f/blockWidth), ceil(nz*1.0f/blockWidth));
total_weight_variant<<<dimGrid, dimBlock>>>(dev_allweight, dev_blurpara, blurParaNum[0], blurParaNum[1]);
}
}
// normalization
string filenorm=argv[4]; // name of the file that contains coincidence data for normalization
get_normal_image(filenorm, nx, ny, nz); // get values for normimage and dev_normimage if norma>0.
// Initialize image-based psf model for forward and backward projection. Use when imagepsf==2.
float *dev_psfImage;
if(imagepsf == 2) {
int totalSize = psfVoxelSize[0] * psfVoxelSize[1] * psfVoxelSize[2];
cudaMalloc((void**) &dev_psfImage, totalSize*sizeof(float) );
cudaMemcpy(dev_psfImage, psfImage, totalSize * sizeof(float), cudaMemcpyHostToDevice);
}
// With image based PSF, normimage needs blurring
if(imagepsf > 0 && norma > 0) {
float *dev_bnorm;
cudaMalloc((void**) &dev_bnorm, msize*sizeof(float) );
if (imagepsf == 1) blur_wrapper(dev_normimage, dev_bnorm, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
else if (imagepsf == 2) blur_wrapper(dev_normimage, dev_bnorm, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
cudaMemcpy(normimage, dev_bnorm, msize*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_bnorm);
}
// If use normalization, scale normimage here.
if(norma > 0) {
float maxnorm = 0.0;
for(int i = 0; i < msize; i++)
if(maxnorm < normimage[i]) maxnorm = normimage[i];
for(int i = 0; i < msize; i++) {
float sen_temp = normimage[i] / maxnorm;
if(sen_temp < ThreshNorm) sen_temp = ThreshNorm;
normimage[i] = sen_temp;
}
cudaMemcpy(dev_normimage, normimage, msize*sizeof(float), cudaMemcpyHostToDevice);
}
// open file that contains input lors for image reconstruction. This should be after normalization file read.
string filein=argv[1];
cout<<"Sorting LORs and copying to device memory......"<<endl;
preplor(filein); // read lors in the file, sort lors, copy to cuda
cout<<"Finish sorting and copying."<<endl;
cudaMemcpyToSymbol(d_lorindex, nummainaxis, 3 * sizeof(int), 0, cudaMemcpyHostToDevice);
cout<<"Number of LORs in each main axis (x,y,x): "<<nummainaxis[0]<<" "<<nummainaxis[1]<<" "<<nummainaxis[2]<<endl;
//re-define beta, beta_new = beta * A / 2. Also initialize smatrix as an uniform image.
if(norma == 0) {
beta = beta * float(numline)/msize / 2.0f;
for(int i=0; i<msize; i++) smatrix[i] = float(numline)/msize;
}
else {
float sumNormimage = 0.0f;
for(int i=0; i< msize; i++) sumNormimage += normimage[i];
beta = beta * float(numline)/sumNormimage / 2.0f;
for(int i=0; i<msize; i++) smatrix[i] = float(numline)/sumNormimage;
}
temp_info[3] = beta;
cudaMemcpyToSymbol(d_info, temp_info, 4 * sizeof(float), 0, cudaMemcpyHostToDevice);
//calculate the average voxel value in the target image. In this case, it remains unchanged across iterations.
allave = smatrix[0];
cudaMemcpyToSymbol(aves, &allave, sizeof(float), 0, cudaMemcpyHostToDevice);
float hostAve = allave; //for storing the value of A.
///////////////////////////////////////////
//start iterations for image reconstruction
///////////////////////////////////////////
for(int ij=0; ij<itenum; ij++){
double templogLikelihood = 0.0;
double templogR = 0.0;
cout<<"Starting "<<ij<<" iteration."<<endl;
timinggpu.StartCounter();
cudaMemcpy( dev_smatrix, smatrix, msize*sizeof(float), cudaMemcpyHostToDevice );
cudaMemset( dev_snmatrix, 0, msize*sizeof(float));
cudaMemset( dev_xlor.linevalue, 0, nummainaxis[0]*sizeof(float));
cudaMemset( dev_ylor.linevalue, 0, nummainaxis[1]*sizeof(float));
cudaMemset( dev_zlor.linevalue, 0, nummainaxis[2]*sizeof(float));
timeall.memoryIO += timinggpu.GetCounter();
// Forward and backward projection
if(imagepsf == 1) {
blur_wrapper(dev_smatrix, dev_blur_smatrix, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_blur_smatrix, dev_snmatrix);
blur_wrapper(dev_snmatrix, dev_blur_snmatrix, NULL, psfsgm, psfrads, psfindr, weight_scale_1);
cudaMemcpy( dev_snmatrix, dev_blur_snmatrix, msize*sizeof(float), cudaMemcpyDeviceToDevice );
}
else if(imagepsf == 2) {
blur_wrapper(dev_smatrix, dev_blur_smatrix, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_blur_smatrix, dev_snmatrix);
blur_wrapper(dev_snmatrix, dev_blur_snmatrix, NULL, psfVoxelSize, dev_psfImage, weight_scale_1);
cudaMemcpy( dev_snmatrix, dev_blur_snmatrix, msize*sizeof(float), cudaMemcpyDeviceToDevice );
}
else if(imagepsf == 0) proj_wrapper(dev_xlor, dev_ylor, dev_zlor, dev_smatrix, dev_snmatrix);
else cout << "Unknown identifier for imagepsf!!" << endl;
// Post processing
if(rgl == 0)
{
if(norma == 0){
timinggpu.StartCounter();
calnewmatrix000<<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
calnewmatrix100<<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_normimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else if(rgl == 1 && blur == 0)
{
if(norma == 0){
timinggpu.StartCounter();
calnewmatrix010<<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_poimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
calnewmatrix110<<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else if(rgl == 1 && blur > 0)
{
//calculate the voxel values in blurred target image
cudaMemset( dev_bmatrix, 0, msize*sizeof(float));
if(blur == 1) blur_wrapper( dev_smatrix, dev_bmatrix, NULL, bsgm, rads, indr, weight_scale_2);
else if(blur == 3) blur_wrapper(dev_smatrix, dev_bmatrix, dev_allweight, indr, dev_blurpara, blurParaNum[0], blurParaNum[1]) ;
//calculate new image for this iteration
if(norma == 0){
timinggpu.StartCounter();
if(blur==1) calnewmatrix011<Blur_Gaussian_Invariant><<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_poimage, dev_bmatrix, NULL, weight_scale_2, NULL, NULL, 0, 0);
else if(blur==3) calnewmatrix011<Blur_Gaussian_Variant><<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_poimage, dev_bmatrix, dev_allweight, 0, NULL, dev_blurpara, blurParaNum[0], blurParaNum[1]);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
else{
timinggpu.StartCounter();
if(blur==1) calnewmatrix111<Blur_Gaussian_Invariant><<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage, dev_bmatrix, NULL, weight_scale_2, NULL, NULL, 0, 0);
else if(blur==3) calnewmatrix111<Blur_Gaussian_Variant><<<blocksPerGrid, threads>>>(dev_snmatrix, dev_smatrix, dev_normimage, dev_poimage, dev_bmatrix, dev_allweight, 0, NULL, dev_blurpara, blurParaNum[0], blurParaNum[1]);
timeall.tpostimageprocess += timinggpu.GetCounter();
}
}
else cout<<"Unknown indentifier for regularization or blur!!!"<<endl;
// Copy new image from device to host memory.
timinggpu.StartCounter();
cudaMemcpy(snmatrix, dev_snmatrix, msize*sizeof(float), cudaMemcpyDeviceToHost);
timeall.memoryIO += timinggpu.GetCounter();
cout<<"Finish "<<ij<<" iteration."<<endl;
//write new image to file.
ostringstream convert;
convert<<(ij+1);
string fileout=argv[2];
fileout.append(convert.str());
writeBinary<float> (fileout, snmatrix, msize);
//calculate objective function values
double *gloglike, *dev_gloglike;
gloglike = (double*) malloc(blocksPerGrid * sizeof(double));
cudaMalloc((void**) &dev_gloglike, blocksPerGrid*sizeof(double) );
calLogLike<<<blocksPerGrid, reducsize>>>(dev_xlor.linevalue, dev_gloglike, nummainaxis[0]);
cudaMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
calLogLike<<<blocksPerGrid, reducsize>>>(dev_ylor.linevalue, dev_gloglike, nummainaxis[1]);
cudaMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
calLogLike<<<blocksPerGrid, reducsize>>>(dev_zlor.linevalue, dev_gloglike, nummainaxis[2]);
cudaMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
calLogLikeS<<<blocksPerGrid, reducsize>>>(dev_smatrix, dev_normimage, dev_gloglike, msize, norma);
cudaMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogLikelihood += gloglike[iobj];
if(rgl == 1 && blur == 0) calLogR<<<blocksPerGrid, reducsize>>>(dev_smatrix, dev_poimage, dev_gloglike, msize);
else if(rgl == 1 && blur > 0) calLogR<<<blocksPerGrid, reducsize>>>(dev_bmatrix, dev_poimage, dev_gloglike, msize);
if(rgl == 1) {
cudaMemcpy(gloglike, dev_gloglike, blocksPerGrid*sizeof(double), cudaMemcpyDeviceToHost);
for(int iobj = 0; iobj < blocksPerGrid; iobj++) templogR += gloglike[iobj];
}
templogR *= beta;
logLikelihood.push_back(templogLikelihood);
logR.push_back(templogR);
//prepare for next iteration
for(int iii=0; iii< msize; iii++)
{
smatrix[iii] = snmatrix[iii];
snmatrix[iii] = 0.;
}
}
/////////////////////////////////////////////////////
// save the value of objective function to file.
/////////////////////////////////////////////////////
ofstream fObjFunc ("ObjectiveFuncValue.txt");
if(fObjFunc.is_open()){
for (int i=0; i< itenum; i++) fObjFunc << i << " "<< logLikelihood[i] << " " << logR[i] << " " << logLikelihood[i] + logR[i] << endl;
}
else cout<< "Can not open ObjectiveFuncValue.txt!!" <<endl;
fObjFunc.close();
timeall.printvalue(); //print out timing information about cuda execution.
///////////////////////////////////////////////////////////
// Free dynamically allocated memory and GPU global memory.
///////////////////////////////////////////////////////////
cudaFree(dev_xlor.x1); cudaFree(dev_xlor.y1); cudaFree(dev_xlor.z1); cudaFree(dev_xlor.x2); cudaFree(dev_xlor.y2); cudaFree(dev_xlor.z2);
cudaFree(dev_ylor.x1); cudaFree(dev_ylor.y1); cudaFree(dev_ylor.z1); cudaFree(dev_ylor.x2); cudaFree(dev_ylor.y2); cudaFree(dev_ylor.z2);
cudaFree(dev_zlor.x1); cudaFree(dev_zlor.y1); cudaFree(dev_zlor.z1); cudaFree(dev_zlor.x2); cudaFree(dev_zlor.y2); cudaFree(dev_zlor.z2);
cudaFree(dev_xlor.linevalue); cudaFree(dev_ylor.linevalue); cudaFree(dev_zlor.linevalue);
cudaFree(dev_smatrix);
cudaFree(dev_snmatrix);
free(nummainaxis);
free(temp_imageindex);
free(temp_info);
free(xlor.x1); free(xlor.y1); free(xlor.z1); free(xlor.x2); free(xlor.y2); free(xlor.z2);
free(ylor.x1); free(ylor.y1); free(ylor.z1); free(ylor.x2); free(ylor.y2); free(ylor.z2);
free(zlor.x1); free(zlor.y1); free(zlor.z1); free(zlor.x2); free(zlor.y2); free(zlor.z2);
free(xlor.linevalue); free(ylor.linevalue); free(zlor.linevalue);
free(smatrix);
free(snmatrix);
if(norma > 0) {free(normimage); cudaFree(dev_normimage);}
if(imagepsf == 2) {free(psfImage); cudaFree(dev_psfImage);}
if(imagepsf > 0) {cudaFree(dev_blur_smatrix); cudaFree(dev_blur_snmatrix);}
if(rgl == 1 && blur == 3) {free(blurpara); cudaFree(dev_allweight); cudaFree(dev_blurpara);}
if(rgl == 1) {free(poimage); cudaFree(dev_poimage); cudaFree(dev_bmatrix);}
return 0;
}
|
b41503bbbe667e6d173ba5167874853e11b48b0c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
hipEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
hipMalloc( &da, memsize );
hipMalloc( &dnewa, memsize );
hipMalloc( &lchange, gx * gy * sizeof(float) );
hipEventCreate( &e1 );
hipEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
hipMemcpy( da, a, memsize, hipMemcpyHostToDevice );
hipMemcpy( dnewa, a, memsize, hipMemcpyHostToDevice );
do{
float msec;
++iters;
hipEventRecord( e1 );
hipLaunchKernelGGL(( jacobikernel), dim3(grid), dim3(block) , 0, 0, da, dnewa, lchange, n, m, w0, w1, w2 );
hipLaunchKernelGGL(( reductionkernel), dim3(1), dim3(bx*by) , 0, 0, lchange, gx*gy );
hipEventRecord( e2 );
hipMemcpy( &change, lchange, sizeof(float), hipMemcpyDeviceToHost );
hipEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
hipMemcpy( a, dnewa, memsize, hipMemcpyDeviceToHost );
hipFree( da );
hipFree( dnewa );
hipFree( lchange );
hipEventDestroy( e1 );
hipEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
b41503bbbe667e6d173ba5167874853e11b48b0c.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
__global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx + ((n-2)%bx == 0?0:1);
gy = (m-2)/by + ((m-2)%by == 0?0:1);
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
do{
float msec;
++iters;
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2 );
reductionkernel<<< 1, bx*by >>>( lchange, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
float *ta;
ta = da;
da = dnewa;
dnewa = ta;
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
static void init( float* a, int n, int m )
{
int i, j;
memset( a, 0, sizeof(float) * n * m );
/* boundary conditions */
for( j = 0; j < n; ++j ){
a[j*m+n-1] = j;
}
for( i = 0; i < m; ++i ){
a[(n-1)*m+i] = i;
}
a[(n-1)*m+m-1] = m+n;
}
int
main( int argc, char* argv[] )
{
int n, m;
float *a;
struct timeval tt1, tt2;
int ms;
float fms;
if( argc <= 1 ){
fprintf( stderr, "%s sizen [sizem]\n", argv[0] );
return 1;
}
n = atoi( argv[1] );
if( n <= 0 ) n = 100;
m = n;
if( argc > 2 ){
m = atoi( argv[2] );
if( m <= 0 ) m = 100;
}
printf( "Jacobi %d x %d\n", n, m );
a = (float*)malloc( sizeof(float) * n * m );
init( a, n, m );
gettimeofday( &tt1, NULL );
JacobiGPU( a, n, m, .2, .1, .1, .1 );
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = (float)ms / 1000000.0f;
printf( "time(gpu ) = %f seconds\n", fms );
}
|
c5f65114fbf0a55e46dcbbbd11529e43e85b1ce9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <color_spinor_field_order.h>
//#define QUAD_SUM
#ifdef QUAD_SUM
#include <dbldbl.h>
#endif
#include <cub_helper.cuh>
template<typename> struct ScalarType { };
template<> struct ScalarType<double> { typedef double type; };
template<> struct ScalarType<double2> { typedef double type; };
template<> struct ScalarType<double3> { typedef double type; };
template<typename> struct Vec2Type { };
template<> struct Vec2Type<double> { typedef double2 type; };
#ifdef QUAD_SUM
#define QudaSumFloat doubledouble
#define QudaSumFloat2 doubledouble2
#define QudaSumFloat3 doubledouble3
template<> struct ScalarType<doubledouble> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble2> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble3> { typedef doubledouble type; };
template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; };
#else
#define QudaSumFloat double
#define QudaSumFloat2 double2
#define QudaSumFloat3 double3
#endif
#define REDUCE_MAX_BLOCKS 65536
void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) {
if (a.Precision() != b.Precision())
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision());
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
void checkLength(const ColorSpinorField &a, ColorSpinorField &b) { \
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
static struct {
const char *vol_str;
const char *aux_str;
char aux_tmp[quda::TuneKey::aux_n];
} blasStrings;
// These are used for reduction kernels
static QudaSumFloat *d_reduce=0;
static QudaSumFloat *h_reduce=0;
static QudaSumFloat *hd_reduce=0;
static hipEvent_t reduceEnd;
namespace quda {
namespace blas {
hipStream_t* getStream();
void* getDeviceReduceBuffer() { return d_reduce; }
void* getMappedHostReduceBuffer() { return hd_reduce; }
void* getHostReduceBuffer() { return h_reduce; }
hipEvent_t* getReduceEvent() { return &reduceEnd; }
void initReduce()
{
const int MaxReduce = 16;
// reduction buffer size
size_t bytes = 2*MaxReduce*3*REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat); // Factor of N for composite reductions
if (!d_reduce) d_reduce = (QudaSumFloat *) device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be QudaSumFloat3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
// only use zero copy reductions when using 64-bit
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
if(deviceProp.canMapHostMemory) {
h_reduce = (QudaSumFloat *) mapped_malloc(bytes);
hipHostGetDevicePointer(&hd_reduce, h_reduce, 0); // set the matching device pointer
} else
#endif
{
h_reduce = (QudaSumFloat *) pinned_malloc(bytes);
hd_reduce = d_reduce;
}
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
}
hipEventCreateWithFlags(&reduceEnd, hipEventDisableTiming);
checkCudaError();
}
void endReduce(void)
{
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
hipEventDestroy(reduceEnd);
}
namespace reduce {
#include <texture.h>
#include <reduce_core.cuh>
#include <reduce_core.h>
#include <reduce_mixed_core.h>
} // namespace reduce
/**
Base class from which all reduction functors should derive.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct ReduceFunctor {
//! pre-computation routine called before the "M-loop"
virtual __device__ __host__ void pre() { ; }
//! where the reduction is usually computed and any auxiliary operations
virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y,
FloatN &z, FloatN &w, FloatN &v) = 0;
//! post-computation routine called after the "M-loop"
virtual __device__ __host__ void post(ReduceType &sum) { ; }
};
/**
Return the L1 norm of x
*/
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const double2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float4 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y) + (ReduceType)fabs(a.z) + (ReduceType)fabs(a.w);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm1 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm1(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ sum += norm1_<ReduceType>(x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm1(const ColorSpinorField &x) {
#ifdef HOST_DEBUG
ColorSpinorField &y = const_cast<ColorSpinorField&>(x); // FIXME
return reduce::reduceCuda<double,QudaSumFloat,Norm1,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
#else
errorQuda("L1 norm kernel only built when HOST_DEBUG is enabled");
return 0.0;
#endif
}
/**
Return the L2 norm of x
*/
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const double2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float4 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
sum += (ReduceType)a.z*(ReduceType)a.z;
sum += (ReduceType)a.w*(ReduceType)a.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ norm2_<ReduceType>(sum,x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm2(const ColorSpinorField &x) {
ColorSpinorField &y = const_cast<ColorSpinorField&>(x);
return reduce::reduceCuda<double,QudaSumFloat,Norm2,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
}
/**
Return the real dot product of x and y
*/
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
sum += (ReduceType)a.z*(ReduceType)b.z;
sum += (ReduceType)a.w*(ReduceType)b.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Dot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Dot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ dot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double reDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,Dot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
* Returns the real component of the dot product of a and b and
* the norm of a
*/
template<typename ReduceType, typename InputType>
__device__ __host__ ReduceType dotNormA_(const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
ReduceType c;
dot_<scalar>(c.x,a,b);
norm2_<scalar>(c.y,a);
return c;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct DotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
DotNormA(const Float2 &a, const Float2 &b){}
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{sum += dotNormA_<ReduceType,FloatN>(x,y);}
static int streams() { return 2; }
static int flops() { return 4; }
};
double2 reDotProductNormA(ColorSpinorField &x,ColorSpinorField &y){
return reduce::reduceCuda<double2,QudaSumFloat2,DotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,axpyNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return real dot product (x,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct AxpyReDot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
AxpyReDot(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; dot_<ReduceType>(sum,x,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyReDot(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,AxpyReDot,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] = x[i] - y[i]
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xmyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
xmyNorm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y = x - y; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
double xmyNorm(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,xmyNorm2,0,1,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ __host__ void Caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
/**
First performs the operation y[i] = a*x[i] + y[i] (complex-valued)
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
Caxpy_(a, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double caxpyNorm(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyNorm2,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
double caxpyXmayNormCuda(float a, float *x, float *y, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyxmaznormx : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyxmaznormx(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); Caxpy_(-a,z,x); norm2_<ReduceType>(sum,x); }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double caxpyXmazNormX(const Complex &a, ColorSpinorField &x,
ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyxmaznormx,1,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
}
/**
double cabxpyAxNorm(float a, complex b, float *x, float *y, n){}
First performs the operation y[i] += a*b*x[i]
Second performs x[i] *= a
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct cabxpyaxnorm : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
cabxpyaxnorm(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ x *= a.x; Caxpy_(b, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double cabxpyAxNorm(const double &a, const Complex &b,
ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,cabxpyaxnorm,1,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(REAL(b), IMAG(b)), x, y, x, x, x);
}
/**
Returns complex-valued dot product of x and y
*/
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.x += (scalar)a.z*(scalar)b.z;
sum.x += (scalar)a.w*(scalar)b.w;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
sum.y += (scalar)a.z*(scalar)b.w;
sum.y -= (scalar)a.w*(scalar)b.z;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Cdot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Cdot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
Complex cDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
First performs the operation y = x + a*y
Second returns cdot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpaycdotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
xpaycdotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ y = x + a.x*y; cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
Complex xpaycDotzy(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,xpaycdotzy,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second returns the dot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpydotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpydotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
Complex caxpyDotzy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,caxpydotzy,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
First returns the dot product (x,y)
Returns the norm of x
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormA_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,a);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormA(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormA_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormA(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First returns the dot product (x,y)
Returns the norm of y
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormB_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,b);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormB : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormB(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormB_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormB(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormB,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
This convoluted kernel does the following:
z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpbypzYmbwcDotProductUYNormY_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
caxpbypzYmbwcDotProductUYNormY_(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) { Caxpy_(a, x, z); Caxpy_(b, y, z); Caxpy_(-b, w, y); cdotNormB_<ReduceType>(sum,v,y); }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 18; } //! flops per element
};
double3 caxpbypzYmbwcDotProductUYNormY(const Complex &a, ColorSpinorField &x,
const Complex &b, ColorSpinorField &y,
ColorSpinorField &z, ColorSpinorField &w,
ColorSpinorField &u) {
if (x.Precision() != z.Precision()) {
return reduce::mixed::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
} else {
return reduce::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
}
}
/**
Specialized kernel for the modified CG norm computation for
computing beta. Computes y = y + a*x and returns norm(y) and
dot(y, delta(y)) where delta(y) is the difference between the
input and out y vector.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyCGNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyCGNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
FloatN z_new = z + a.x*x;
norm2_<scalar>(sum.x,z_new);
dot_<scalar>(sum.y,z_new,z_new-z);
z = z_new;
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per real element
};
Complex axpyCGNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
// swizzle since mixed is on z
double2 cg_norm ;
if (x.Precision() != y.Precision()) {
cg_norm = reduce::mixed::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,0,1,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, x, y, x, x);
} else {
cg_norm = reduce::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,0,1,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, x, y, x, x);
}
return Complex(cg_norm.x, cg_norm.y);
}
/**
This kernel returns (x, x) and (r,r) and also returns the so-called
heavy quark norm as used by MILC: 1 / N * \sum_i (r, r)_i / (x, x)_i, where
i is site index and N is the number of sites.
When this kernel is launched, we must enforce that the parameter M
in the launcher corresponds to the number of FloatN fields used to
represent the spinor, e.g., M=6 for Wilson and M=3 for staggered.
This is only the case for half-precision kernels by default. To
enable this, the siteUnroll template parameter must be set true
when reduceCuda is instantiated.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct HeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
HeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x); norm2_<real>(aux.y,y);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! undercounts since it excludes the per-site division
};
double3 HeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,HeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, r, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
Variant of the HeavyQuarkResidualNorm kernel: this takes three
arguments, the first two are summed together to form the
solution, with the third being the residual vector. This removes
the need an additional xpy call in the solvers, impriving
performance.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpyHeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
xpyHeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x + y); norm2_<real>(aux.y,z);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 5; }
};
double3 xpyHeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &y,
ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,xpyHeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
double3 tripleCGUpdate(V x, V y, V z){}
First performs the operation norm2(x)
Second performs the operatio norm2(y)
Third performs the operation dotPropduct(y,z)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct tripleCGReduction_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
tripleCGReduction_(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
norm2_<scalar>(sum.x,x); norm2_<scalar>(sum.y,y); dot_<scalar>(sum.z,y,z);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 tripleCGReduction(ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double3,QudaSumFloat3,tripleCGReduction_,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
}
} // namespace blas
} // namespace quda
|
c5f65114fbf0a55e46dcbbbd11529e43e85b1ce9.cu
|
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <color_spinor_field_order.h>
//#define QUAD_SUM
#ifdef QUAD_SUM
#include <dbldbl.h>
#endif
#include <cub_helper.cuh>
template<typename> struct ScalarType { };
template<> struct ScalarType<double> { typedef double type; };
template<> struct ScalarType<double2> { typedef double type; };
template<> struct ScalarType<double3> { typedef double type; };
template<typename> struct Vec2Type { };
template<> struct Vec2Type<double> { typedef double2 type; };
#ifdef QUAD_SUM
#define QudaSumFloat doubledouble
#define QudaSumFloat2 doubledouble2
#define QudaSumFloat3 doubledouble3
template<> struct ScalarType<doubledouble> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble2> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble3> { typedef doubledouble type; };
template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; };
#else
#define QudaSumFloat double
#define QudaSumFloat2 double2
#define QudaSumFloat3 double3
#endif
#define REDUCE_MAX_BLOCKS 65536
void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) {
if (a.Precision() != b.Precision())
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision());
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
void checkLength(const ColorSpinorField &a, ColorSpinorField &b) { \
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
static struct {
const char *vol_str;
const char *aux_str;
char aux_tmp[quda::TuneKey::aux_n];
} blasStrings;
// These are used for reduction kernels
static QudaSumFloat *d_reduce=0;
static QudaSumFloat *h_reduce=0;
static QudaSumFloat *hd_reduce=0;
static cudaEvent_t reduceEnd;
namespace quda {
namespace blas {
cudaStream_t* getStream();
void* getDeviceReduceBuffer() { return d_reduce; }
void* getMappedHostReduceBuffer() { return hd_reduce; }
void* getHostReduceBuffer() { return h_reduce; }
cudaEvent_t* getReduceEvent() { return &reduceEnd; }
void initReduce()
{
const int MaxReduce = 16;
// reduction buffer size
size_t bytes = 2*MaxReduce*3*REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat); // Factor of N for composite reductions
if (!d_reduce) d_reduce = (QudaSumFloat *) device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be QudaSumFloat3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
// only use zero copy reductions when using 64-bit
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
if(deviceProp.canMapHostMemory) {
h_reduce = (QudaSumFloat *) mapped_malloc(bytes);
cudaHostGetDevicePointer(&hd_reduce, h_reduce, 0); // set the matching device pointer
} else
#endif
{
h_reduce = (QudaSumFloat *) pinned_malloc(bytes);
hd_reduce = d_reduce;
}
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
}
cudaEventCreateWithFlags(&reduceEnd, cudaEventDisableTiming);
checkCudaError();
}
void endReduce(void)
{
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
cudaEventDestroy(reduceEnd);
}
namespace reduce {
#include <texture.h>
#include <reduce_core.cuh>
#include <reduce_core.h>
#include <reduce_mixed_core.h>
} // namespace reduce
/**
Base class from which all reduction functors should derive.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct ReduceFunctor {
//! pre-computation routine called before the "M-loop"
virtual __device__ __host__ void pre() { ; }
//! where the reduction is usually computed and any auxiliary operations
virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y,
FloatN &z, FloatN &w, FloatN &v) = 0;
//! post-computation routine called after the "M-loop"
virtual __device__ __host__ void post(ReduceType &sum) { ; }
};
/**
Return the L1 norm of x
*/
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const double2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float4 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y) + (ReduceType)fabs(a.z) + (ReduceType)fabs(a.w);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm1 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm1(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ sum += norm1_<ReduceType>(x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm1(const ColorSpinorField &x) {
#ifdef HOST_DEBUG
ColorSpinorField &y = const_cast<ColorSpinorField&>(x); // FIXME
return reduce::reduceCuda<double,QudaSumFloat,Norm1,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
#else
errorQuda("L1 norm kernel only built when HOST_DEBUG is enabled");
return 0.0;
#endif
}
/**
Return the L2 norm of x
*/
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const double2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float4 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
sum += (ReduceType)a.z*(ReduceType)a.z;
sum += (ReduceType)a.w*(ReduceType)a.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ norm2_<ReduceType>(sum,x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm2(const ColorSpinorField &x) {
ColorSpinorField &y = const_cast<ColorSpinorField&>(x);
return reduce::reduceCuda<double,QudaSumFloat,Norm2,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
}
/**
Return the real dot product of x and y
*/
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
sum += (ReduceType)a.z*(ReduceType)b.z;
sum += (ReduceType)a.w*(ReduceType)b.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Dot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Dot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ dot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double reDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,Dot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
* Returns the real component of the dot product of a and b and
* the norm of a
*/
template<typename ReduceType, typename InputType>
__device__ __host__ ReduceType dotNormA_(const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
ReduceType c;
dot_<scalar>(c.x,a,b);
norm2_<scalar>(c.y,a);
return c;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct DotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
DotNormA(const Float2 &a, const Float2 &b){}
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{sum += dotNormA_<ReduceType,FloatN>(x,y);}
static int streams() { return 2; }
static int flops() { return 4; }
};
double2 reDotProductNormA(ColorSpinorField &x,ColorSpinorField &y){
return reduce::reduceCuda<double2,QudaSumFloat2,DotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,axpyNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return real dot product (x,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct AxpyReDot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
AxpyReDot(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; dot_<ReduceType>(sum,x,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyReDot(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,AxpyReDot,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] = x[i] - y[i]
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xmyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
xmyNorm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y = x - y; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
double xmyNorm(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,xmyNorm2,0,1,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ __host__ void Caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
/**
First performs the operation y[i] = a*x[i] + y[i] (complex-valued)
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
Caxpy_(a, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double caxpyNorm(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyNorm2,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
double caxpyXmayNormCuda(float a, float *x, float *y, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyxmaznormx : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyxmaznormx(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); Caxpy_(-a,z,x); norm2_<ReduceType>(sum,x); }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double caxpyXmazNormX(const Complex &a, ColorSpinorField &x,
ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyxmaznormx,1,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
}
/**
double cabxpyAxNorm(float a, complex b, float *x, float *y, n){}
First performs the operation y[i] += a*b*x[i]
Second performs x[i] *= a
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct cabxpyaxnorm : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
cabxpyaxnorm(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ x *= a.x; Caxpy_(b, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double cabxpyAxNorm(const double &a, const Complex &b,
ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,cabxpyaxnorm,1,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(REAL(b), IMAG(b)), x, y, x, x, x);
}
/**
Returns complex-valued dot product of x and y
*/
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.x += (scalar)a.z*(scalar)b.z;
sum.x += (scalar)a.w*(scalar)b.w;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
sum.y += (scalar)a.z*(scalar)b.w;
sum.y -= (scalar)a.w*(scalar)b.z;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Cdot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Cdot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
Complex cDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
First performs the operation y = x + a*y
Second returns cdot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpaycdotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
xpaycdotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ y = x + a.x*y; cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
Complex xpaycDotzy(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,xpaycdotzy,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second returns the dot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpydotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpydotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
Complex caxpyDotzy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,caxpydotzy,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
First returns the dot product (x,y)
Returns the norm of x
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormA_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,a);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormA(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormA_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormA(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First returns the dot product (x,y)
Returns the norm of y
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormB_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,b);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormB : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormB(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormB_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormB(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormB,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
This convoluted kernel does the following:
z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpbypzYmbwcDotProductUYNormY_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
caxpbypzYmbwcDotProductUYNormY_(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) { Caxpy_(a, x, z); Caxpy_(b, y, z); Caxpy_(-b, w, y); cdotNormB_<ReduceType>(sum,v,y); }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 18; } //! flops per element
};
double3 caxpbypzYmbwcDotProductUYNormY(const Complex &a, ColorSpinorField &x,
const Complex &b, ColorSpinorField &y,
ColorSpinorField &z, ColorSpinorField &w,
ColorSpinorField &u) {
if (x.Precision() != z.Precision()) {
return reduce::mixed::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
} else {
return reduce::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
}
}
/**
Specialized kernel for the modified CG norm computation for
computing beta. Computes y = y + a*x and returns norm(y) and
dot(y, delta(y)) where delta(y) is the difference between the
input and out y vector.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyCGNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyCGNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
FloatN z_new = z + a.x*x;
norm2_<scalar>(sum.x,z_new);
dot_<scalar>(sum.y,z_new,z_new-z);
z = z_new;
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per real element
};
Complex axpyCGNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
// swizzle since mixed is on z
double2 cg_norm ;
if (x.Precision() != y.Precision()) {
cg_norm = reduce::mixed::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,0,1,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, x, y, x, x);
} else {
cg_norm = reduce::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,0,1,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, x, y, x, x);
}
return Complex(cg_norm.x, cg_norm.y);
}
/**
This kernel returns (x, x) and (r,r) and also returns the so-called
heavy quark norm as used by MILC: 1 / N * \sum_i (r, r)_i / (x, x)_i, where
i is site index and N is the number of sites.
When this kernel is launched, we must enforce that the parameter M
in the launcher corresponds to the number of FloatN fields used to
represent the spinor, e.g., M=6 for Wilson and M=3 for staggered.
This is only the case for half-precision kernels by default. To
enable this, the siteUnroll template parameter must be set true
when reduceCuda is instantiated.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct HeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
HeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x); norm2_<real>(aux.y,y);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! undercounts since it excludes the per-site division
};
double3 HeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,HeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, r, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
Variant of the HeavyQuarkResidualNorm kernel: this takes three
arguments, the first two are summed together to form the
solution, with the third being the residual vector. This removes
the need an additional xpy call in the solvers, impriving
performance.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpyHeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
xpyHeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x + y); norm2_<real>(aux.y,z);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 5; }
};
double3 xpyHeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &y,
ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,xpyHeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
double3 tripleCGUpdate(V x, V y, V z){}
First performs the operation norm2(x)
Second performs the operatio norm2(y)
Third performs the operation dotPropduct(y,z)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct tripleCGReduction_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
tripleCGReduction_(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
norm2_<scalar>(sum.x,x); norm2_<scalar>(sum.y,y); dot_<scalar>(sum.z,y,z);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 tripleCGReduction(ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double3,QudaSumFloat3,tripleCGReduction_,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
}
} // namespace blas
} // namespace quda
|
3e1c64d73025c51cd6f82881aa0b21afcbdba40b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <iostream>
#include <string.h>
#include <opencv2\opencv.hpp>
#include <math.h>
#include <io.h>
#include <vector>
#include <fstream>
using namespace std;
using namespace cv;
__host__ int otsuThresh(int* hist, int imgHeight, int imgWidth)
{
float sum = 0;
for (int i = 0; i < 256; i++)
{
sum += i * hist[i];
}
float w0 = 0, u0 = 0;
float u = sum / (imgHeight * imgWidth);
float val = 0, maxval = 0;
float s = 0, n = 0;
int thresh = 0;
for (int i = 0; i < 256; i++)
{
s += hist[i] * i;
n += hist[i];
w0 = n / (imgHeight * imgWidth);
u0 = s / n;
val = (u - u0) * (u - u0) * w0 / (1 - w0);
if (val > maxval)
{
maxval = val;
thresh = i;
}
}
return thresh;
}
//get gray histogram
__global__ void imhistInCuda(unsigned char* dataIn, int* hist, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (xIndex < imgWidth && yIndex < imgHeight)
{
atomicAdd(&hist[dataIn[yIndex * imgWidth + xIndex]], 1);
}
}
// Calculate the maximum Between-Class variance with CUDA
__global__ void OTSUthresh(const int* hist, float* sum, float* s, float* n, float* val, int imgHeight, int imgWidth, int* OtsuThresh)
{
if (blockIdx.x == 0)
{
int index = threadIdx.x;
atomicAdd(&sum[0], hist[index] * index);
}
else
{
int index = threadIdx.x;
if (index < blockIdx.x)
{
atomicAdd(&s[blockIdx.x - 1], hist[index] * index);
atomicAdd(&n[blockIdx.x - 1], hist[index]);
}
}
//synchronize all threads
__syncthreads();
if (blockIdx.x > 0)
{
int index = blockIdx.x - 1;
float u = sum[0] / (imgHeight * imgWidth);
float w0 = n[index] / (imgHeight * imgWidth);
float u0 = s[index] / n[index];
if (w0 == 1)
{
val[index] = 0;
}
else
{
val[index] = (u - u0) * (u - u0) * w0 / (1 - w0);
}
}
__syncthreads();
if (threadIdx.x == 0 && blockIdx.x == 0)
{
float maxval = 0;
for (int i = 0; i < 256; i++)
{
if (val[i] > maxval)
{
maxval = val[i];
OtsuThresh[0] = i;
OtsuThresh[1] = val[i];
}
}
}
}
//thresholding
__global__ void otsuInCuda(unsigned char* dataIn, unsigned char* dataOut, int imgHeight, int imgWidth, int* hThresh)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (xIndex < imgWidth && yIndex < imgHeight)
{
if (dataIn[yIndex * imgWidth + xIndex] > hThresh[0])
{
dataOut[yIndex * imgWidth + xIndex] = 255;
}
}
}
//implementation of ostu in GPU without Opencv ostu lib
// get global best threshold
int getOstu(const Mat& in){
int rows = in.rows;
int cols = in.cols;
long size = rows * cols;
float histogram[256] = { 0 };
for (int i = 0; i < rows; ++i){
const uchar* p = in.ptr<uchar>(i);
for (int j = 0; j < cols; ++j){
histogram[int(*p++)]++;
}
}
int threshold;
//sum0: sum of the foreground grayscale
//sum1:sum of the background grayscale
long sum0 = 0, sum1 = 0;
//cnt0: sum of foreground pixel
long cnt0 = 0, cnt1 = 0;
//w0,w1: The proportion of the foreground gray scale pixels (0~i) and
//background gray scale pixels(i~255) to the whole image
double w0 = 0, w1 = 0;
//u0:Average gray level of the foreground pixel
//u1://Average gray level of the background pixel
double u0 = 0, u1 = 0;
double variance = 0;
double maxVariance = 0;
for (int i = 1; i < 256; i++) {
sum0 = 0;
sum1 = 0;
cnt0 = 0;
cnt1 = 0;
w0 = 0;
w1 = 0;
for (int j = 0; j < i; j++) {
cnt0 += histogram[j];
sum0 += j * histogram[j];
}
u0 = (double)sum0 / cnt0;
w0 = (double)cnt0 / size;
for (int j = i; j <= 255; j++){
cnt1 += histogram[j];
sum1 += j * histogram[j];
}
u1 = (double)sum1 / cnt1;
w1 = 1 - w0;
//Calculate the variance of foreground and background pixels
variance = w0 * w1 * (u0 - u1) * (u0 - u1);
if (variance > maxVariance){
maxVariance = variance;
threshold = i;
}
}
return threshold;
}
// get all files In the specified directory
void getAllFiles(string path, vector<string>& files, vector<string>& files_name)
{
//
long hFile = 0;
//
struct _finddata_t fileinfo;
string p;
if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
{
do
{
if ((fileinfo.attrib & _A_SUBDIR)) {
if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0) {
//files.push_back(p.assign(path).append("\\").append(fileinfo.name) );
getAllFiles(p.assign(path).append("\\").append(fileinfo.name), files, files_name);
}
}else{
files.push_back(p.assign(path).append("\\").append(fileinfo.name));
files_name.push_back(fileinfo.name);
}
} while (_findnext(hFile, &fileinfo) == 0);
_findclose(hFile);
}
}
int main()
{
//input gray scale imagechange path of image in various resolution
// define double-dimensional array with the result dealt with opencv in raw iamge
//Mat srcImg = imread("D:/project/image_segment_with_cuda/image/raw/2.jpg", 0);
vector<string> temp;
vector<string> files_name;
getAllFiles("D:\\project\\image_segment_with_cuda\\image\\raw", temp,files_name);
for (int i = 0; i < temp.size(); ++i){
cout << temp[i] << endl;
Mat srcImg = imread(temp[i], 0);
int imgHeight = srcImg.rows;
int imgWidth = srcImg.cols;
//0.implemention of OTSU binarization in CPU
double time_cpu = static_cast<double>(getTickCount());
int bestThreshold = getOstu(srcImg);
cout << "The best threshold is: " << bestThreshold << endl;
Mat otsuResultImage = Mat::zeros(imgHeight, imgWidth, CV_8UC1);
//Binary operation
for (int i = 0; i < imgHeight; i++) {
for (int j = 0; j < imgWidth; j++) {
if (srcImg.at<uchar>(i, j) > bestThreshold) {
otsuResultImage.at<uchar>(i, j) = 255;
}
else {
otsuResultImage.at<uchar>(i, j) = 0;
}
}
}
time_cpu = ((double)getTickCount() - time_cpu) / getTickFrequency();
cout << "The Run Time of OSTU Algorithm with cpu is :" << time_cpu * 1000 << "ms" << endl;
//1.implemention of OTSU binarization by OpenCv
double time_opencv = static_cast<double>(getTickCount());
Mat dstImg1;
//THRESH_OTSU: use Otsu algorithm to choose the optimal threshold value, default value is 8
threshold(srcImg, dstImg1, 0, 255, THRESH_OTSU);
time_opencv = ((double)getTickCount() - time_opencv) / getTickFrequency();
cout << "The Run Time of OSTU algorithm with OpenCv is :" << time_opencv * 1000 << "ms" << endl;
//2.implemention of OTSU binarization by CUDA in GPU
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// CV_8UC1: the number of channels:1, Unsigned 8bits image
Mat dstImg2(imgHeight, imgWidth, CV_8UC1, Scalar(0));
//create GPU memory
unsigned char* d_in;
int* d_hist;
hipMalloc((void**)&d_in, imgHeight * imgWidth * sizeof(unsigned char));
hipMalloc((void**)&d_hist, 256 * sizeof(int));
// transfer the gray scale image data from CPU(srcImg.data) to GPU(d_in)
hipMemcpy(d_in, srcImg.data, imgHeight * imgWidth * sizeof(unsigned char), hipMemcpyHostToDevice);
//block dimension:32*32
dim3 threadsPerBlock1(32, 32);
//
dim3 blocksPerGrid1((imgWidth + 32 - 1) / 32, (imgHeight + 32 - 1) / 32);
imhistInCuda << <blocksPerGrid1, threadsPerBlock1 >> > (d_in, d_hist, imgHeight, imgWidth);
float* d_sum;
float* d_s;
float* d_n;
float* d_val;
int* d_t;
hipMalloc((void**)&d_sum, sizeof(float));
hipMalloc((void**)&d_s, 256 * sizeof(float));
hipMalloc((void**)&d_n, 256 * sizeof(float));
hipMalloc((void**)&d_val, 256 * sizeof(float));
hipMalloc((void**)&d_t, 2 * sizeof(int));
//define the maximum Between-Class variance calculation parallel specification, where 257 is 1 + 256,
//The first block is used to calculate the sum of the gray scale image,
//and the next 256 blocks are used to calculate the S and N corresponding to the 256 grayscales
dim3 threadsPerBlock2(256, 1);
dim3 blocksPerGrid2(257, 1);
OTSUthresh << <blocksPerGrid2, threadsPerBlock2 >> > (d_hist, d_sum, d_s, d_n, d_val, imgHeight, imgWidth, d_t);
unsigned char* d_out;
hipMalloc((void**)&d_out, imgHeight * imgWidth * sizeof(unsigned char));
otsuInCuda << <blocksPerGrid1, threadsPerBlock1 >> > (d_in, d_out, imgHeight, imgWidth, d_t);
// output the image dealt with Ostu method, from GPU to CPU
hipMemcpy(dstImg2.data, d_out, imgHeight * imgWidth * sizeof(unsigned char), hipMemcpyDeviceToHost);
// calculate the time consumption of program with CUDA in GPU
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout << "The Run Time of OSTU Algorithm with CUDA is :" << time << "ms" << endl;
cout << "Accelerative ratio between CUDA and Opencv is :" << time_opencv * 1000 / time << endl;
cout << "Accelerative ratio between CUDA and CPU program is :" << time_cpu * 1000 / time << endl;
hipFree(d_in);
hipFree(d_out);
hipFree(d_hist);
hipFree(d_sum);
hipFree(d_s);
hipFree(d_n);
hipFree(d_val);
hipFree(d_t);
// output the image after image segment
//imwrite("./image/result/2_cpu.jpg", otsuResultImage);
//imwrite("./image/result/2_opencv.jpg", dstImg1);
//imwrite("./image/result/2_cuda.jpg", dstImg2);
imwrite("./image/result/"+files_name[i]+"_cpu.jpg", otsuResultImage);
imwrite("./image/result/" + files_name[i] + "_opencv.jpg", dstImg1);
imwrite("./image/result/" + files_name[i] + "_cuda.jpg", dstImg2);
return 0;
}
}
|
3e1c64d73025c51cd6f82881aa0b21afcbdba40b.cu
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <device_functions.h>
#include <iostream>
#include <string.h>
#include <opencv2\opencv.hpp>
#include <math.h>
#include <io.h>
#include <vector>
#include <fstream>
using namespace std;
using namespace cv;
__host__ int otsuThresh(int* hist, int imgHeight, int imgWidth)
{
float sum = 0;
for (int i = 0; i < 256; i++)
{
sum += i * hist[i];
}
float w0 = 0, u0 = 0;
float u = sum / (imgHeight * imgWidth);
float val = 0, maxval = 0;
float s = 0, n = 0;
int thresh = 0;
for (int i = 0; i < 256; i++)
{
s += hist[i] * i;
n += hist[i];
w0 = n / (imgHeight * imgWidth);
u0 = s / n;
val = (u - u0) * (u - u0) * w0 / (1 - w0);
if (val > maxval)
{
maxval = val;
thresh = i;
}
}
return thresh;
}
//get gray histogram
__global__ void imhistInCuda(unsigned char* dataIn, int* hist, int imgHeight, int imgWidth)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (xIndex < imgWidth && yIndex < imgHeight)
{
atomicAdd(&hist[dataIn[yIndex * imgWidth + xIndex]], 1);
}
}
// Calculate the maximum Between-Class variance with CUDA
__global__ void OTSUthresh(const int* hist, float* sum, float* s, float* n, float* val, int imgHeight, int imgWidth, int* OtsuThresh)
{
if (blockIdx.x == 0)
{
int index = threadIdx.x;
atomicAdd(&sum[0], hist[index] * index);
}
else
{
int index = threadIdx.x;
if (index < blockIdx.x)
{
atomicAdd(&s[blockIdx.x - 1], hist[index] * index);
atomicAdd(&n[blockIdx.x - 1], hist[index]);
}
}
//synchronize all threads
__syncthreads();
if (blockIdx.x > 0)
{
int index = blockIdx.x - 1;
float u = sum[0] / (imgHeight * imgWidth);
float w0 = n[index] / (imgHeight * imgWidth);
float u0 = s[index] / n[index];
if (w0 == 1)
{
val[index] = 0;
}
else
{
val[index] = (u - u0) * (u - u0) * w0 / (1 - w0);
}
}
__syncthreads();
if (threadIdx.x == 0 && blockIdx.x == 0)
{
float maxval = 0;
for (int i = 0; i < 256; i++)
{
if (val[i] > maxval)
{
maxval = val[i];
OtsuThresh[0] = i;
OtsuThresh[1] = val[i];
}
}
}
}
//thresholding
__global__ void otsuInCuda(unsigned char* dataIn, unsigned char* dataOut, int imgHeight, int imgWidth, int* hThresh)
{
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
if (xIndex < imgWidth && yIndex < imgHeight)
{
if (dataIn[yIndex * imgWidth + xIndex] > hThresh[0])
{
dataOut[yIndex * imgWidth + xIndex] = 255;
}
}
}
//implementation of ostu in GPU without Opencv ostu lib
// get global best threshold
int getOstu(const Mat& in){
int rows = in.rows;
int cols = in.cols;
long size = rows * cols;
float histogram[256] = { 0 };
for (int i = 0; i < rows; ++i){
const uchar* p = in.ptr<uchar>(i);
for (int j = 0; j < cols; ++j){
histogram[int(*p++)]++;
}
}
int threshold;
//sum0: sum of the foreground grayscale
//sum1:sum of the background grayscale
long sum0 = 0, sum1 = 0;
//cnt0: sum of foreground pixel
long cnt0 = 0, cnt1 = 0;
//w0,w1: The proportion of the foreground gray scale pixels (0~i) and
//background gray scale pixels(i~255) to the whole image
double w0 = 0, w1 = 0;
//u0:Average gray level of the foreground pixel
//u1://Average gray level of the background pixel
double u0 = 0, u1 = 0;
double variance = 0;
double maxVariance = 0;
for (int i = 1; i < 256; i++) {
sum0 = 0;
sum1 = 0;
cnt0 = 0;
cnt1 = 0;
w0 = 0;
w1 = 0;
for (int j = 0; j < i; j++) {
cnt0 += histogram[j];
sum0 += j * histogram[j];
}
u0 = (double)sum0 / cnt0;
w0 = (double)cnt0 / size;
for (int j = i; j <= 255; j++){
cnt1 += histogram[j];
sum1 += j * histogram[j];
}
u1 = (double)sum1 / cnt1;
w1 = 1 - w0;
//Calculate the variance of foreground and background pixels
variance = w0 * w1 * (u0 - u1) * (u0 - u1);
if (variance > maxVariance){
maxVariance = variance;
threshold = i;
}
}
return threshold;
}
// get all files In the specified directory
void getAllFiles(string path, vector<string>& files, vector<string>& files_name)
{
//文件句柄
long hFile = 0;
//文件信息
struct _finddata_t fileinfo;
string p;
if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
{
do
{
if ((fileinfo.attrib & _A_SUBDIR)) {
if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0) {
//files.push_back(p.assign(path).append("\\").append(fileinfo.name) );
getAllFiles(p.assign(path).append("\\").append(fileinfo.name), files, files_name);
}
}else{
files.push_back(p.assign(path).append("\\").append(fileinfo.name));
files_name.push_back(fileinfo.name);
}
} while (_findnext(hFile, &fileinfo) == 0);
_findclose(hFile);
}
}
int main()
{
//input gray scale image,change path of image in various resolution
// define double-dimensional array with the result dealt with opencv in raw iamge
//Mat srcImg = imread("D:/project/image_segment_with_cuda/image/raw/2.jpg", 0);
vector<string> temp;
vector<string> files_name;
getAllFiles("D:\\project\\image_segment_with_cuda\\image\\raw", temp,files_name);
for (int i = 0; i < temp.size(); ++i){
cout << temp[i] << endl;
Mat srcImg = imread(temp[i], 0);
int imgHeight = srcImg.rows;
int imgWidth = srcImg.cols;
//0.implemention of OTSU binarization in CPU
double time_cpu = static_cast<double>(getTickCount());
int bestThreshold = getOstu(srcImg);
cout << "The best threshold is: " << bestThreshold << endl;
Mat otsuResultImage = Mat::zeros(imgHeight, imgWidth, CV_8UC1);
//Binary operation
for (int i = 0; i < imgHeight; i++) {
for (int j = 0; j < imgWidth; j++) {
if (srcImg.at<uchar>(i, j) > bestThreshold) {
otsuResultImage.at<uchar>(i, j) = 255;
}
else {
otsuResultImage.at<uchar>(i, j) = 0;
}
}
}
time_cpu = ((double)getTickCount() - time_cpu) / getTickFrequency();
cout << "The Run Time of OSTU Algorithm with cpu is :" << time_cpu * 1000 << "ms" << endl;
//1.implemention of OTSU binarization by OpenCv
double time_opencv = static_cast<double>(getTickCount());
Mat dstImg1;
//THRESH_OTSU: use Otsu algorithm to choose the optimal threshold value, default value is 8
threshold(srcImg, dstImg1, 0, 255, THRESH_OTSU);
time_opencv = ((double)getTickCount() - time_opencv) / getTickFrequency();
cout << "The Run Time of OSTU algorithm with OpenCv is :" << time_opencv * 1000 << "ms" << endl;
//2.implemention of OTSU binarization by CUDA in GPU
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// CV_8UC1: the number of channels:1, Unsigned 8bits image
Mat dstImg2(imgHeight, imgWidth, CV_8UC1, Scalar(0));
//create GPU memory
unsigned char* d_in;
int* d_hist;
cudaMalloc((void**)&d_in, imgHeight * imgWidth * sizeof(unsigned char));
cudaMalloc((void**)&d_hist, 256 * sizeof(int));
// transfer the gray scale image data from CPU(srcImg.data) to GPU(d_in)
cudaMemcpy(d_in, srcImg.data, imgHeight * imgWidth * sizeof(unsigned char), cudaMemcpyHostToDevice);
//block dimension:32*32
dim3 threadsPerBlock1(32, 32);
//
dim3 blocksPerGrid1((imgWidth + 32 - 1) / 32, (imgHeight + 32 - 1) / 32);
imhistInCuda << <blocksPerGrid1, threadsPerBlock1 >> > (d_in, d_hist, imgHeight, imgWidth);
float* d_sum;
float* d_s;
float* d_n;
float* d_val;
int* d_t;
cudaMalloc((void**)&d_sum, sizeof(float));
cudaMalloc((void**)&d_s, 256 * sizeof(float));
cudaMalloc((void**)&d_n, 256 * sizeof(float));
cudaMalloc((void**)&d_val, 256 * sizeof(float));
cudaMalloc((void**)&d_t, 2 * sizeof(int));
//define the maximum Between-Class variance calculation parallel specification, where 257 is 1 + 256,
//The first block is used to calculate the sum of the gray scale image,
//and the next 256 blocks are used to calculate the S and N corresponding to the 256 grayscales
dim3 threadsPerBlock2(256, 1);
dim3 blocksPerGrid2(257, 1);
OTSUthresh << <blocksPerGrid2, threadsPerBlock2 >> > (d_hist, d_sum, d_s, d_n, d_val, imgHeight, imgWidth, d_t);
unsigned char* d_out;
cudaMalloc((void**)&d_out, imgHeight * imgWidth * sizeof(unsigned char));
otsuInCuda << <blocksPerGrid1, threadsPerBlock1 >> > (d_in, d_out, imgHeight, imgWidth, d_t);
// output the image dealt with Ostu method, from GPU to CPU
cudaMemcpy(dstImg2.data, d_out, imgHeight * imgWidth * sizeof(unsigned char), cudaMemcpyDeviceToHost);
// calculate the time consumption of program with CUDA in GPU
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout << "The Run Time of OSTU Algorithm with CUDA is :" << time << "ms" << endl;
cout << "Accelerative ratio between CUDA and Opencv is :" << time_opencv * 1000 / time << endl;
cout << "Accelerative ratio between CUDA and CPU program is :" << time_cpu * 1000 / time << endl;
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_hist);
cudaFree(d_sum);
cudaFree(d_s);
cudaFree(d_n);
cudaFree(d_val);
cudaFree(d_t);
// output the image after image segment
//imwrite("./image/result/2_cpu.jpg", otsuResultImage);
//imwrite("./image/result/2_opencv.jpg", dstImg1);
//imwrite("./image/result/2_cuda.jpg", dstImg2);
imwrite("./image/result/"+files_name[i]+"_cpu.jpg", otsuResultImage);
imwrite("./image/result/" + files_name[i] + "_opencv.jpg", dstImg1);
imwrite("./image/result/" + files_name[i] + "_cuda.jpg", dstImg2);
return 0;
}
}
|
b6e1c9bb70d7a454b8953afe10b1a18d20e7900a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
long NUM_ITERATIONS = 100;
long N = 5000;
#define BLOCK_SIZE 1024
void jacobi(double* u, double* f) {
int count = 0;
//int sum_not_changed = 0;
//float old_residual;
while (count < NUM_ITERATIONS) {
double* new_u = (double*) malloc(N * N * sizeof(double));
#pragma omp parallel for
for (int j = N - 1; j > 0; j--) {
for (int i = 1; i < N; i++) {
float new_value = (f[i*N + j]/((N+1)*(N+1)) + u[(i-1)*N + j] + u[i*N + j-1] + u[(i+1)*N + j] + u[i*N + j+1]) / 4;
new_u[i*N + j] = new_value;
}
}
memcpy(u, new_u, N*N*sizeof(double));
free(new_u);
count++;
}
}
__global__ void jacobi_kernel(double* u, double* f, double* new_u, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N * N) {
new_u[idx] = (f[idx]/((N+1)*(N+1)) + u[idx-N] + u[idx-1] + u[idx+N] + u[idx+1]) / 4;
}
}
void jacobi_gpu(double* u, double* f) {
int count = 0;
int num_blocks = N / BLOCK_SIZE + 1;
while (count < NUM_ITERATIONS) {
double* new_u = (double*) malloc(N * N * sizeof(double));
hipLaunchKernelGGL(( jacobi_kernel), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, u, f, new_u, N);
memcpy(u, new_u, N*N*sizeof(double));
free(new_u);
count++;
}
}
int main(int argc, char** argv) {
double* u = (double*) malloc(N * N * sizeof(double));
double* f = (double*) malloc(N * N * sizeof(double));
for (long i = 0; i < N; i++) {
for (long j = 0; j < N; j++) {
u[i*N+j] = 0;
f[i*N+j] = 1;
}
}
Timer t;
t.tic();
jacobi(u, f);
printf("Serial Jacobi time: %f s\n", t.toc());
t.tic();
jacobi_gpu(u, f);
printf("GPU Jacobi time: %f s\n", t.toc());
free(u);
free(f);
return 0;
}
|
b6e1c9bb70d7a454b8953afe10b1a18d20e7900a.cu
|
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
long NUM_ITERATIONS = 100;
long N = 5000;
#define BLOCK_SIZE 1024
void jacobi(double* u, double* f) {
int count = 0;
//int sum_not_changed = 0;
//float old_residual;
while (count < NUM_ITERATIONS) {
double* new_u = (double*) malloc(N * N * sizeof(double));
#pragma omp parallel for
for (int j = N - 1; j > 0; j--) {
for (int i = 1; i < N; i++) {
float new_value = (f[i*N + j]/((N+1)*(N+1)) + u[(i-1)*N + j] + u[i*N + j-1] + u[(i+1)*N + j] + u[i*N + j+1]) / 4;
new_u[i*N + j] = new_value;
}
}
memcpy(u, new_u, N*N*sizeof(double));
free(new_u);
count++;
}
}
__global__ void jacobi_kernel(double* u, double* f, double* new_u, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N * N) {
new_u[idx] = (f[idx]/((N+1)*(N+1)) + u[idx-N] + u[idx-1] + u[idx+N] + u[idx+1]) / 4;
}
}
void jacobi_gpu(double* u, double* f) {
int count = 0;
int num_blocks = N / BLOCK_SIZE + 1;
while (count < NUM_ITERATIONS) {
double* new_u = (double*) malloc(N * N * sizeof(double));
jacobi_kernel<<<num_blocks, BLOCK_SIZE>>>(u, f, new_u, N);
memcpy(u, new_u, N*N*sizeof(double));
free(new_u);
count++;
}
}
int main(int argc, char** argv) {
double* u = (double*) malloc(N * N * sizeof(double));
double* f = (double*) malloc(N * N * sizeof(double));
for (long i = 0; i < N; i++) {
for (long j = 0; j < N; j++) {
u[i*N+j] = 0;
f[i*N+j] = 1;
}
}
Timer t;
t.tic();
jacobi(u, f);
printf("Serial Jacobi time: %f s\n", t.toc());
t.tic();
jacobi_gpu(u, f);
printf("GPU Jacobi time: %f s\n", t.toc());
free(u);
free(f);
return 0;
}
|
eb62c1d9547e85bb61e30e16e59a99f8890c9ed6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
typedef struct point_t {
double x;
double g;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{72.77,118.72},{65.12,120.63},{82.02,129.32},{65.16,102.22},
{78.19,121.29},{76.29,135.67},{65.86,115.55},{75.69,130.88},
{72.44,116.34},{65.14,89.67},{80.03,124.97},{51.96,81.62},
{57.74,108.42},{52.02,95.88},{18.63,49.91},{16.71,39.35},
{45.07,83.26},{89.87,137.12},{86.91,149.83},{41.21,70.41},
{99.11,147.55},{69.25,113.96},{91.73,161.45},{72.98,121.41},
{22.08,62.76},{95.92,154.31},{87.00,120.18},{18.74,49.32},
{15.83,42.11},{25.46,45.34},{ 7.09,34.50},{97.59,136.62},
{13.02,23.82},{13.37,19.51},{55.68,88.74},{96.94,151.90},
{34.73,93.20},{73.04,114.74},{74.72,112.20},{51.31,85.62},
{51.50,91.89},{69.87,99.73},{71.73,106.84},{17.76,40.13},
{84.18,148.14},{11.67,29.20},{87.48,134.83},{36.26,63.61},
{30.26,61.37},{83.83,153.50},{57.07,102.15},{64.70,114.23},
{38.18,58.44},{25.20,39.48},{12.18,47.79},{94.70,147.53},
{53.10,93.77},{80.75,123.70},{19.61,43.95},{78.03,118.86},
{27.75,46.41},{63.42,108.27},{93.14,146.76},{19.63,52.94},
{90.27,160.99},{28.29,63.93},{95.08,153.72},{36.87,78.41},
{91.29,151.49},{43.37,66.62},{65.24,123.55},{12.16,33.46},
{39.98,63.36},{16.34,37.91},{69.46,102.03},{95.92,145.48},
{50.93,86.43},{46.12,71.51},{70.43,119.41},{48.68,90.83},
{96.18,136.95},{80.01,133.45},{93.99,148.44},{86.73,143.92},
{24.79,63.22},{45.72,75.40},{52.34,85.23},{46.86,72.18},
{60.15,93.41},{40.80,75.71},{66.56,101.48},{85.77,135.47},
{28.93,55.13},{63.90,114.17},{95.59,145.55},{94.73,138.09},
{70.88,105.41},{17.77,58.84},{67.20,111.37},{34.58,64.99},
{56.88,102.15},{91.00,142.02},{95.22,157.74},{54.68,98.89},
{68.77,91.31},{35.52,73.41},{60.71,99.62},{99.14,176.58},
{77.54,126.89},{36.67,86.90},{69.70,119.25},{81.02,124.29},
{ 9.38,26.68},{28.26,44.17},{ 7.49,15.60},{48.94,84.00},
{94.48,138.61},{26.64,63.54},{89.24,133.61},{83.37,141.18},
{71.00,128.40},{36.95,74.38},{27.94,62.41},{84.60,123.33},
{99.50,162.99},{11.97,33.10},{58.02,99.89},{25.84,51.64},
{15.39,41.81},{69.74,110.86},{98.98,144.25},{68.20,98.25},
{17.50,42.01},{39.30,71.80},{69.39,114.15},{63.86,92.52},
{70.46,134.11},{36.50,83.08},{57.23,93.67},{ 8.71,40.29},
{ 8.97,52.99},{27.47,57.04},{78.93,131.94},{ 6.03,41.87},
{43.61,83.41},{27.24,39.02},{86.95,144.67},{48.47,97.90},
{95.24,155.67},{83.88,122.06},{15.65,43.01},{14.93,42.21},
{92.78,131.25},{40.34,63.24},{89.05,134.79},{78.62,127.08},
{54.65,90.81},{60.35,72.27},{ 9.75,50.09},{36.13,62.37},
{35.18,48.53},{33.24,70.35},{56.99,96.10},{76.54,112.51},
{44.16,64.34},{27.41,53.63},{ 2.44,41.93},{25.25,40.04},
{67.46,115.47},{97.90,138.04},{83.51,145.06},{33.03,70.04},
{76.13,119.74},{10.50,34.00},{36.33,60.49},{44.27,96.92},
{61.26,107.65},{20.36,35.83},{49.88,85.51},{ 2.17,29.37},
{13.59,37.75},{32.89,54.31},{22.20,57.99},{41.84,59.05},
{23.26,61.40},{20.72,45.21},{16.72,37.15},{70.61,112.23},
{44.86,79.91},{80.83,115.30},{64.70,90.04},{ 4.65,35.57},
{77.96,128.33},{54.90,93.28},{60.22,94.44},{95.13,167.10},
{22.57,52.83},{38.99,76.88},{49.95,89.09},{81.31,137.80},
{ 4.96,32.73},{ 4.73,30.87},{ 2.18,17.67},{26.25,55.79},
{91.63,142.63},{74.46,118.35},{23.52,42.10},{58.17,105.26},
{16.26,38.65},{67.73,113.45},{65.71,116.24},{76.55,127.74},
{69.59,108.29},{60.06,97.91},{51.00,91.43},{18.86,35.61},
{59.47,92.46},{31.02,50.93},{78.42,136.49},{67.16,94.95},
{64.21,99.02},{17.65,47.04},{45.07,78.27},{69.62,88.91},
{14.43,37.19},{34.33,59.98},{22.66,51.53},{73.59,110.37},
{40.41,96.62},{31.06,72.43},{ 1.26,38.12},{46.15,91.49},
{38.87,71.54},{76.87,128.61},{52.82,96.88},{70.35,125.66},
{22.94,57.24},{81.61,135.29},{54.83,103.52},{46.73,66.55},
{80.65,110.88},{29.22,43.41},{68.57,102.69},{92.48,139.96},
{23.59,62.61},{83.88,126.12},{69.23,130.49},{37.82,71.05},
{41.81,90.44},{34.44,78.54},{37.84,65.29},{51.34,80.36},
{34.70,60.85},{74.14,114.66},{48.00,74.10},{ 1.77,18.11},
{73.91,130.51},{73.29,121.01},{ 1.27,32.31},{61.05,110.64},
{17.38,63.17},{17.98,61.02},{54.30,82.75},{70.69,97.29},
{59.30,100.05},{90.88,139.05},{15.43,52.78},{96.78,147.65},
{21.30,39.81},{79.81,121.41},{38.28,73.76},{20.73,51.30},
{13.78,31.65},{55.67,85.83},{55.06,100.61},{14.51,42.89},
{64.30,120.85},{36.82,71.36},{88.05,124.00},{29.79,74.65},
{27.70,53.41},{60.18,97.74},{68.63,102.01},{42.71,73.09},
{85.65,135.12},{21.27,57.41},{32.14,63.55},{88.28,144.64},
{35.10,79.33},{79.19,135.53},{69.09,130.93},{80.74,114.49},
{20.03,58.62},{32.62,55.72},{39.17,66.34},{60.56,102.04},
{65.24,115.02},{90.95,134.48},{71.19,121.85},{48.60,97.96},
{52.33,103.70},{93.05,158.29},{94.38,131.31},{98.18,155.10},
{20.17,47.78},{59.19,98.88},{46.42,79.33},{97.42,125.48},
{11.28,20.62},{98.25,140.63},{46.18,77.29},{55.52,96.91},
{50.82,116.03},{69.93,113.02},{76.44,127.80},{36.29,88.58},
{60.80,93.42},{ 0.99,20.91},{10.83,42.19},{ 5.24,31.77},
{67.66,106.16},{45.93,77.40},{ 2.26,29.07},{98.04,147.89},
{90.10,135.25},{34.34,70.72},{44.56,87.52},{83.69,128.02},
{16.35,44.24},{76.93,138.95},{32.46,62.19},{40.16,72.16},
{94.03,137.62},{44.21,81.22},{ 4.64,47.68},{74.71,122.75},
{41.17,81.58},{59.93,100.10},{17.80,42.92},{ 5.50,22.20},
{21.83,69.99},{52.17,99.26},{71.31,102.54},{22.02,44.93},
{22.89,61.84},{69.86,110.34},{ 7.86,24.40},{56.34,108.23},
{ 2.60,19.78},{82.29,122.53},{51.94,86.58},{20.28,60.66},
{44.89,84.64},{10.41,44.07},{35.87,69.06},{58.90,79.11},
{89.92,145.02},{93.09,147.95},{23.14,46.83},{56.76,100.89},
{35.95,70.50},{30.48,69.42},{48.71,71.49},{97.69,137.11},
{59.72,107.91},{39.70,80.98},{14.40,47.72},{55.73,91.84},
{22.66,50.61},{78.85,129.26},{55.15,98.11},{10.71,47.03},
{75.86,112.22},{64.20,110.91},{91.53,127.51},{34.89,63.14},
{82.21,117.14},{65.41,107.56},{25.29,75.41},{73.35,112.65},
{26.50,56.75},{36.86,56.62},{12.33,41.98},{92.04,149.78},
{25.86,43.64},{ 0.44,16.90},{19.23,54.09},{52.87,74.58},
{62.83,110.16},{56.27,88.79},{ 0.33,19.72},{25.14,60.01},
{68.48,130.12},{22.68,62.09},{97.28,150.76},{24.85,55.60},
{62.24,98.99},{99.80,153.78},{26.90,56.52},{79.04,115.67},
{ 1.13,15.33},{78.40,128.23},{81.23,125.23},{53.04,82.26},
{49.61,97.91},{15.08,45.92},{55.36,96.35},{47.91,76.86},
{82.93,132.38},{61.56,106.42},{67.81,111.09},{55.63,95.55},
{33.79,68.29},{86.66,136.26},{16.89,42.73},{24.83,57.62},
{12.90,40.69},{78.39,111.62},{58.90,96.35},{11.73,24.90},
{44.24,91.95},{31.47,64.50},{58.61,94.69},{15.70,35.23},
{27.52,67.16},{72.20,128.90},{15.96,32.80},{32.14,58.48},
{73.06,109.88},{82.47,127.54},{33.99,61.17},{17.27,42.44},
{19.18,49.35},{54.73,97.67},{57.00,94.35},{96.99,146.07},
{33.25,54.37},{90.92,141.06},{83.68,140.12},{88.50,147.82},
{60.44,91.05},{22.79,35.45},{ 0.20,34.58},{98.92,182.16},
{12.39,35.74},{74.98,131.33},{82.47,118.27},{11.09,34.28},
{10.56,25.61},{31.71,71.95},{33.29,74.89},{28.68,69.04},
{21.93,55.87},{69.64,105.79},{ 8.24,19.17},{76.62,131.99},
{32.77,67.13},{24.12,48.72},{77.22,112.93},{61.01,105.01},
{27.03,46.38},{22.86,37.93},{35.18,67.40},{38.25,66.98},
{60.39,80.99},{40.56,89.15},{ 3.93,12.91},{41.04,100.99},
{53.50,88.59},{44.18,61.32},{43.25,94.15},{ 9.83,35.99},
{85.62,134.98},{39.21,60.05},{54.46,99.44},{33.12,67.91},
{92.79,149.50},{97.52,143.02},{75.39,102.58},{62.25,78.02},
{77.98,129.84},{91.33,122.16},{95.69,144.90},{68.03,111.21},
{ 6.12,11.48},{28.17,45.95},{33.43,61.77},{17.10,31.09},
{84.96,132.10},{51.07,88.83},{76.25,132.04},{99.05,166.96},
{18.00,44.95},{19.70,52.96},{59.94,94.02},{15.82,48.84},
{66.19,115.66},{71.67,114.24},{14.15,33.93},{ 3.76,31.67},
{52.16,91.69},{51.76,88.38},{97.58,138.34},{55.60,101.85},
{97.02,144.97},{ 7.93, 9.12},{98.75,150.62},{ 3.50,21.44},
{96.65,134.11},{26.35,51.85},{97.00,146.85},{ 7.37,25.50},
{35.90,66.20},{50.50,89.54},{68.38,124.33},{50.23,106.00},
{46.78,89.52},{86.89,142.92},{96.78,155.91},{96.50,147.83},
{32.54,34.84},{92.16,138.84},{56.31,85.81},{12.32,39.66},
{80.16,116.45},{ 1.79,26.05},{ 3.41,28.04},{ 4.84,31.77},
{ 7.98,41.72},{88.53,150.86},{93.77,137.93},{51.48,92.98},
{33.53,64.20},{11.06,39.65},{39.01,64.17},{13.85,31.53},
{53.81,108.88},{98.09,147.25},{13.53,54.31},{17.99,35.17},
{67.32,111.08},{24.75,41.85},{41.95,77.57},{12.95,37.28},
{20.99,56.58},{68.55,107.85},{71.77,111.31},{64.49,98.94},
{ 3.55,23.81},{81.75,138.28},{88.68,132.28},{55.92,94.76},
{ 9.87,45.56},{ 0.39,18.48},{10.73,43.80},{48.28,87.56},
{53.14,63.43},{66.24,94.22},{40.82,65.66},{30.59,64.12},
{72.07,118.91},{86.42,124.38},{64.83,96.69},{31.56,67.90},
{53.72,82.97},{46.19,77.73},{38.63,88.90},{72.31,118.74},
{98.13,156.96},{85.32,140.11},{38.76,67.08},{ 6.23,20.83},
{41.90,60.27},{69.32,113.81},{95.46,157.47},{51.02,80.72},
{ 9.47,42.07},{17.83,40.39},{91.78,139.74},{23.75,61.08},
{11.72,38.13},{66.67,113.10},{55.44,95.63},{62.31,107.67},
{62.13,105.25},{42.96,72.97},{44.26,73.67},{75.51,117.92},
{35.63,59.92},{91.88,138.01},{74.63,131.13},{43.69,84.98},
{25.90,46.74},{82.85,138.29},{34.55,52.01},{99.55,149.24},
{15.79,43.53},{95.86,135.97},{62.70,111.12},{38.35,80.78},
{ 5.29,39.78},{58.23,99.45},{59.68,113.93},{78.77,127.72},
{49.16,81.76},{56.55,97.19},{52.21,87.98},{55.96,114.67},
{97.15,146.64},{94.01,151.93},{43.31,79.87},{34.16,56.68},
{28.83,59.31},{19.83,47.01},{10.21,38.64},{79.72,109.91},
{10.46,32.97},{28.94,57.52},{68.62,116.51},{ 6.84,24.44},
{15.52,46.41},{31.43,66.17},{70.64,119.01},{70.24,116.63},
{12.92,27.90},{49.57,75.40},{ 9.32,23.11},{51.67,93.68},
{ 3.98,32.19},{90.57,146.42},{17.60,48.24},{47.50,90.41},
{37.79,73.89},{42.77,91.83},{34.35,59.30},{99.78,158.47},
{75.98,128.12},{62.95,113.09},{97.16,152.02},{13.11,35.42},
{ 4.08,28.40},{57.40,94.01},{79.93,127.87},{68.57,110.25},
{10.90,36.33},{19.81,49.12},{86.06,119.99},{46.20,72.35},
{32.82,63.84},{ 4.93,25.19},{15.85,44.91},{20.04,51.73},
{86.56,141.89},{87.55,150.74},{35.34,79.65},{27.83,56.11},
{60.92,81.19},{25.61,53.37},{26.40,53.64},{86.86,124.99},
{ 7.29,40.82},{35.77,66.45},{18.39,64.97},{38.48,73.61},
{21.30,62.41},{24.25,39.91},{11.60,48.43},{11.48,39.85},
{94.78,131.88},{55.63,85.73},{81.29,135.01},{64.18,99.29},
{60.66,99.84},{13.13,32.77},{26.02,46.59},{11.15,49.39},
{92.35,139.92},{24.36,49.16},{14.54,16.82},{42.96,74.67},
{86.32,138.83},{68.68,108.94},{44.38,74.30},{49.13,85.12},
{36.17,77.52},{17.40,24.57},{20.68,40.13},{79.41,137.20},
{36.50,77.99},{ 5.68,24.48},{42.76,76.17},{19.46,35.54},
{ 1.08,26.36},{61.10,111.34},{42.43,69.87},{73.41,112.01},
{63.22,93.20},{86.54,148.56},{34.81,84.76},{60.56,120.24},
{18.57,61.75},{64.52,106.19},{84.26,125.45},{29.06,46.85},
{10.50,63.08},{61.35,115.87},{97.00,132.02},{54.89,106.64},
{60.02,100.90},{52.48,87.21},{18.39,45.96},{ 8.48,23.90},
{26.39,60.66},{ 9.06,32.27},{66.50,121.18},{57.74,94.70},
{25.66,65.11},{ 4.72,34.54},{98.24,135.21},{74.73,108.60},
{ 6.56,22.62},{59.14,90.97},{63.28,97.68},{62.08,105.43},
{ 3.32,31.89},{40.85,61.30},{64.77,114.54},{79.90,124.93},
{48.62,93.82},{39.18,62.48},{68.52,107.87},{17.21,50.66},
{96.98,147.20},{83.15,149.78},{62.95,96.44},{39.43,81.83},
{35.05,69.53},{97.00,130.77},{65.74,116.67},{59.47,108.46},
{99.79,143.53},{74.41,127.93},{49.23,79.84},{48.09,79.68},
{24.35,39.55},{64.06,102.73},{30.43,62.26},{35.18,76.80},
{35.97,72.38},{67.10,123.28},{ 5.39,28.04},{50.42,71.75},
{66.24,107.42},{62.68,98.82},{99.83,145.11},{81.83,158.11},
{11.95,45.17},{87.93,136.10},{94.07,142.53},{17.22,31.52},
{52.86,84.57},{ 7.54,31.19},{ 1.89,24.77},{32.53,63.64},
{ 4.01,25.49},{76.42,131.15},{29.62,79.68},{ 2.00,23.82},
{65.14,78.12},{21.85,55.01},{31.48,65.68},{49.86,82.59},
{12.83,45.28},{16.84,35.94},{86.08,136.94},{96.06,141.05},
{53.87,93.27},{83.51,133.44},{ 1.66,31.46},{95.55,137.01},
{17.86,53.11},{57.50,105.66},{53.71,86.03},{46.89,88.06},
{23.18,50.81},{86.92,130.30},{45.23,79.77},{72.64,104.64},
{31.12,54.10},{18.39,52.72},{ 0.05,30.71},{42.07,72.84},
{84.06,130.70},{47.92,72.23},{40.04,64.11},{31.22,46.99},
{50.19,91.88},{76.17,112.30},{58.03,92.31},{61.65,82.10},
{83.77,141.17},{ 7.64,33.09},{61.59,111.34},{87.63,134.22},
{ 5.70,32.06},{17.96,44.92},{82.95,138.52},{55.38,101.67},
{59.98,108.53},{39.15,83.49},{43.79,66.94},{38.95,76.62},
{13.00,33.96},{92.12,146.59},{33.69,54.26},{31.77,70.51},
{76.01,119.01},{89.04,112.33},{81.66,120.53},{26.01,70.28},
{50.63,97.30},{90.34,145.37},{53.87,91.36},{52.47,94.66},
{72.14,102.84},{58.85,99.75},{91.94,149.34},{58.00,104.40},
{19.38,48.33},{68.36,108.16},{65.65,105.51},{13.09,48.61},
{89.62,158.28},{76.18,110.95},{62.50,103.82},{26.82,57.27},
{29.22,66.26},{82.88,124.08},{41.74,87.98},{43.19,76.66},
{17.91,27.98},{27.89,60.05},{62.38,107.68},{63.27,113.16},
{ 5.44,25.32},{67.67,114.17},{ 7.12,36.77},{74.81,136.62},
{76.88,112.53},{89.74,150.27},{ 6.39,20.44},{22.51,50.76},
{94.03,138.47},{76.74,108.17},{ 7.04,29.72},{14.36,38.43},
{24.51,60.54},{82.24,130.52},{69.94,112.04},{28.69,76.99},
{14.45,50.79},{73.87,115.32},{25.68,45.81},{50.07,102.88},
{73.78,116.08},{11.07,20.97},{65.56,100.87},{14.48,45.08},
{83.19,122.32},{47.87,80.93},{24.48,39.61},{23.33,55.38},
{32.35,50.84},{36.16,82.82},{28.34,55.84},{11.59,28.66},
{ 0.40,26.06},{66.88,111.04},{69.58,113.59},{93.99,145.56},
{78.13,109.50},{29.81,57.28},{23.77,59.59},{70.49,98.18},
{44.95,98.42},{20.51,28.11},{25.98,60.82},{82.15,129.63},
{28.77,55.51},{66.78,102.42},{75.42,129.50},{23.39,64.67},
{76.55,110.56},{14.19,36.25},{33.00,51.26},{80.36,116.31},
{46.74,72.04},{25.14,51.22},{42.25,83.65},{84.13,153.10},
{80.50,123.69},{88.97,137.08},{95.68,149.98},{90.76,141.42},
{68.80,116.88},{23.59,34.43},{42.86,75.48},{65.71,105.58},
{78.32,123.27},{66.13,104.58},{ 1.34,42.60},{30.11,69.76},
{90.65,127.59},{34.78,69.18},{29.77,77.93},{38.36,66.87},
{92.92,147.24},{35.69,93.35},{62.34,95.76},{74.98,124.79},
{87.69,149.64},{20.97,42.04},{34.13,66.90},{36.64,71.45},
{51.59,89.00},{33.67,44.32},{16.69,39.25},{21.13,57.54},
{62.53,117.08},{37.14,70.64},{32.77,48.50},{75.61,118.15},
{33.30,70.29},{79.86,125.99},{75.59,128.36},{98.53,154.32},
{81.91,157.72},{22.40,49.18},{50.96,75.34},{60.90,114.22},
{10.56,54.60},{52.07,95.27},{43.17,82.10},{50.66,78.40},
{92.35,159.07},{95.95,159.33},{54.10,108.13},{38.65,81.78},
{60.38,95.30},{94.44,149.00},{37.09,56.81},{ 0.96,46.94},
{41.82,86.77},{38.02,60.62},{32.91,57.15},{43.08,78.34},
{16.36,45.02},{66.52,103.37},{13.53,54.06},{61.65,104.07},
{22.59,39.62},{17.16,60.07},{89.26,134.35},{36.66,64.65},
{27.42,54.52},{78.54,119.20},{74.01,117.82},{15.74,42.28},
{18.50,49.44},{26.40,53.02},{20.80,22.16},{79.34,141.43},
{91.51,144.71},{94.60,154.66},{16.13,47.29},{ 0.48,27.16},
{89.43,142.53},{ 6.85,36.89},{ 0.45,22.73},{ 5.81,30.04},
{36.10,53.96},{90.42,133.09},{92.77,147.72},{ 6.41,36.37},
{76.45,129.76},{75.49,116.43},{50.33,91.69},{67.94,109.11},
{ 2.25,17.10},{17.69,51.56},{52.66,81.55},{36.65,65.41},
{95.03,122.46},{43.45,65.97},{87.86,125.81},{16.87,28.76},
{12.43,27.92},{25.52,48.11},{96.43,152.85},{43.94,79.85}
};
double residual_error(double x, double g, double m, double c) {
double e = (m * x) + c - g;
return e * e;
}
__device__ double d_residual_error(double x, double g, double m, double c) {
double e = (m * x) + c - g;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].g, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].g, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL(( d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
hipDeviceSynchronize();
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
eb62c1d9547e85bb61e30e16e59a99f8890c9ed6.cu
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
typedef struct point_t {
double x;
double g;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{72.77,118.72},{65.12,120.63},{82.02,129.32},{65.16,102.22},
{78.19,121.29},{76.29,135.67},{65.86,115.55},{75.69,130.88},
{72.44,116.34},{65.14,89.67},{80.03,124.97},{51.96,81.62},
{57.74,108.42},{52.02,95.88},{18.63,49.91},{16.71,39.35},
{45.07,83.26},{89.87,137.12},{86.91,149.83},{41.21,70.41},
{99.11,147.55},{69.25,113.96},{91.73,161.45},{72.98,121.41},
{22.08,62.76},{95.92,154.31},{87.00,120.18},{18.74,49.32},
{15.83,42.11},{25.46,45.34},{ 7.09,34.50},{97.59,136.62},
{13.02,23.82},{13.37,19.51},{55.68,88.74},{96.94,151.90},
{34.73,93.20},{73.04,114.74},{74.72,112.20},{51.31,85.62},
{51.50,91.89},{69.87,99.73},{71.73,106.84},{17.76,40.13},
{84.18,148.14},{11.67,29.20},{87.48,134.83},{36.26,63.61},
{30.26,61.37},{83.83,153.50},{57.07,102.15},{64.70,114.23},
{38.18,58.44},{25.20,39.48},{12.18,47.79},{94.70,147.53},
{53.10,93.77},{80.75,123.70},{19.61,43.95},{78.03,118.86},
{27.75,46.41},{63.42,108.27},{93.14,146.76},{19.63,52.94},
{90.27,160.99},{28.29,63.93},{95.08,153.72},{36.87,78.41},
{91.29,151.49},{43.37,66.62},{65.24,123.55},{12.16,33.46},
{39.98,63.36},{16.34,37.91},{69.46,102.03},{95.92,145.48},
{50.93,86.43},{46.12,71.51},{70.43,119.41},{48.68,90.83},
{96.18,136.95},{80.01,133.45},{93.99,148.44},{86.73,143.92},
{24.79,63.22},{45.72,75.40},{52.34,85.23},{46.86,72.18},
{60.15,93.41},{40.80,75.71},{66.56,101.48},{85.77,135.47},
{28.93,55.13},{63.90,114.17},{95.59,145.55},{94.73,138.09},
{70.88,105.41},{17.77,58.84},{67.20,111.37},{34.58,64.99},
{56.88,102.15},{91.00,142.02},{95.22,157.74},{54.68,98.89},
{68.77,91.31},{35.52,73.41},{60.71,99.62},{99.14,176.58},
{77.54,126.89},{36.67,86.90},{69.70,119.25},{81.02,124.29},
{ 9.38,26.68},{28.26,44.17},{ 7.49,15.60},{48.94,84.00},
{94.48,138.61},{26.64,63.54},{89.24,133.61},{83.37,141.18},
{71.00,128.40},{36.95,74.38},{27.94,62.41},{84.60,123.33},
{99.50,162.99},{11.97,33.10},{58.02,99.89},{25.84,51.64},
{15.39,41.81},{69.74,110.86},{98.98,144.25},{68.20,98.25},
{17.50,42.01},{39.30,71.80},{69.39,114.15},{63.86,92.52},
{70.46,134.11},{36.50,83.08},{57.23,93.67},{ 8.71,40.29},
{ 8.97,52.99},{27.47,57.04},{78.93,131.94},{ 6.03,41.87},
{43.61,83.41},{27.24,39.02},{86.95,144.67},{48.47,97.90},
{95.24,155.67},{83.88,122.06},{15.65,43.01},{14.93,42.21},
{92.78,131.25},{40.34,63.24},{89.05,134.79},{78.62,127.08},
{54.65,90.81},{60.35,72.27},{ 9.75,50.09},{36.13,62.37},
{35.18,48.53},{33.24,70.35},{56.99,96.10},{76.54,112.51},
{44.16,64.34},{27.41,53.63},{ 2.44,41.93},{25.25,40.04},
{67.46,115.47},{97.90,138.04},{83.51,145.06},{33.03,70.04},
{76.13,119.74},{10.50,34.00},{36.33,60.49},{44.27,96.92},
{61.26,107.65},{20.36,35.83},{49.88,85.51},{ 2.17,29.37},
{13.59,37.75},{32.89,54.31},{22.20,57.99},{41.84,59.05},
{23.26,61.40},{20.72,45.21},{16.72,37.15},{70.61,112.23},
{44.86,79.91},{80.83,115.30},{64.70,90.04},{ 4.65,35.57},
{77.96,128.33},{54.90,93.28},{60.22,94.44},{95.13,167.10},
{22.57,52.83},{38.99,76.88},{49.95,89.09},{81.31,137.80},
{ 4.96,32.73},{ 4.73,30.87},{ 2.18,17.67},{26.25,55.79},
{91.63,142.63},{74.46,118.35},{23.52,42.10},{58.17,105.26},
{16.26,38.65},{67.73,113.45},{65.71,116.24},{76.55,127.74},
{69.59,108.29},{60.06,97.91},{51.00,91.43},{18.86,35.61},
{59.47,92.46},{31.02,50.93},{78.42,136.49},{67.16,94.95},
{64.21,99.02},{17.65,47.04},{45.07,78.27},{69.62,88.91},
{14.43,37.19},{34.33,59.98},{22.66,51.53},{73.59,110.37},
{40.41,96.62},{31.06,72.43},{ 1.26,38.12},{46.15,91.49},
{38.87,71.54},{76.87,128.61},{52.82,96.88},{70.35,125.66},
{22.94,57.24},{81.61,135.29},{54.83,103.52},{46.73,66.55},
{80.65,110.88},{29.22,43.41},{68.57,102.69},{92.48,139.96},
{23.59,62.61},{83.88,126.12},{69.23,130.49},{37.82,71.05},
{41.81,90.44},{34.44,78.54},{37.84,65.29},{51.34,80.36},
{34.70,60.85},{74.14,114.66},{48.00,74.10},{ 1.77,18.11},
{73.91,130.51},{73.29,121.01},{ 1.27,32.31},{61.05,110.64},
{17.38,63.17},{17.98,61.02},{54.30,82.75},{70.69,97.29},
{59.30,100.05},{90.88,139.05},{15.43,52.78},{96.78,147.65},
{21.30,39.81},{79.81,121.41},{38.28,73.76},{20.73,51.30},
{13.78,31.65},{55.67,85.83},{55.06,100.61},{14.51,42.89},
{64.30,120.85},{36.82,71.36},{88.05,124.00},{29.79,74.65},
{27.70,53.41},{60.18,97.74},{68.63,102.01},{42.71,73.09},
{85.65,135.12},{21.27,57.41},{32.14,63.55},{88.28,144.64},
{35.10,79.33},{79.19,135.53},{69.09,130.93},{80.74,114.49},
{20.03,58.62},{32.62,55.72},{39.17,66.34},{60.56,102.04},
{65.24,115.02},{90.95,134.48},{71.19,121.85},{48.60,97.96},
{52.33,103.70},{93.05,158.29},{94.38,131.31},{98.18,155.10},
{20.17,47.78},{59.19,98.88},{46.42,79.33},{97.42,125.48},
{11.28,20.62},{98.25,140.63},{46.18,77.29},{55.52,96.91},
{50.82,116.03},{69.93,113.02},{76.44,127.80},{36.29,88.58},
{60.80,93.42},{ 0.99,20.91},{10.83,42.19},{ 5.24,31.77},
{67.66,106.16},{45.93,77.40},{ 2.26,29.07},{98.04,147.89},
{90.10,135.25},{34.34,70.72},{44.56,87.52},{83.69,128.02},
{16.35,44.24},{76.93,138.95},{32.46,62.19},{40.16,72.16},
{94.03,137.62},{44.21,81.22},{ 4.64,47.68},{74.71,122.75},
{41.17,81.58},{59.93,100.10},{17.80,42.92},{ 5.50,22.20},
{21.83,69.99},{52.17,99.26},{71.31,102.54},{22.02,44.93},
{22.89,61.84},{69.86,110.34},{ 7.86,24.40},{56.34,108.23},
{ 2.60,19.78},{82.29,122.53},{51.94,86.58},{20.28,60.66},
{44.89,84.64},{10.41,44.07},{35.87,69.06},{58.90,79.11},
{89.92,145.02},{93.09,147.95},{23.14,46.83},{56.76,100.89},
{35.95,70.50},{30.48,69.42},{48.71,71.49},{97.69,137.11},
{59.72,107.91},{39.70,80.98},{14.40,47.72},{55.73,91.84},
{22.66,50.61},{78.85,129.26},{55.15,98.11},{10.71,47.03},
{75.86,112.22},{64.20,110.91},{91.53,127.51},{34.89,63.14},
{82.21,117.14},{65.41,107.56},{25.29,75.41},{73.35,112.65},
{26.50,56.75},{36.86,56.62},{12.33,41.98},{92.04,149.78},
{25.86,43.64},{ 0.44,16.90},{19.23,54.09},{52.87,74.58},
{62.83,110.16},{56.27,88.79},{ 0.33,19.72},{25.14,60.01},
{68.48,130.12},{22.68,62.09},{97.28,150.76},{24.85,55.60},
{62.24,98.99},{99.80,153.78},{26.90,56.52},{79.04,115.67},
{ 1.13,15.33},{78.40,128.23},{81.23,125.23},{53.04,82.26},
{49.61,97.91},{15.08,45.92},{55.36,96.35},{47.91,76.86},
{82.93,132.38},{61.56,106.42},{67.81,111.09},{55.63,95.55},
{33.79,68.29},{86.66,136.26},{16.89,42.73},{24.83,57.62},
{12.90,40.69},{78.39,111.62},{58.90,96.35},{11.73,24.90},
{44.24,91.95},{31.47,64.50},{58.61,94.69},{15.70,35.23},
{27.52,67.16},{72.20,128.90},{15.96,32.80},{32.14,58.48},
{73.06,109.88},{82.47,127.54},{33.99,61.17},{17.27,42.44},
{19.18,49.35},{54.73,97.67},{57.00,94.35},{96.99,146.07},
{33.25,54.37},{90.92,141.06},{83.68,140.12},{88.50,147.82},
{60.44,91.05},{22.79,35.45},{ 0.20,34.58},{98.92,182.16},
{12.39,35.74},{74.98,131.33},{82.47,118.27},{11.09,34.28},
{10.56,25.61},{31.71,71.95},{33.29,74.89},{28.68,69.04},
{21.93,55.87},{69.64,105.79},{ 8.24,19.17},{76.62,131.99},
{32.77,67.13},{24.12,48.72},{77.22,112.93},{61.01,105.01},
{27.03,46.38},{22.86,37.93},{35.18,67.40},{38.25,66.98},
{60.39,80.99},{40.56,89.15},{ 3.93,12.91},{41.04,100.99},
{53.50,88.59},{44.18,61.32},{43.25,94.15},{ 9.83,35.99},
{85.62,134.98},{39.21,60.05},{54.46,99.44},{33.12,67.91},
{92.79,149.50},{97.52,143.02},{75.39,102.58},{62.25,78.02},
{77.98,129.84},{91.33,122.16},{95.69,144.90},{68.03,111.21},
{ 6.12,11.48},{28.17,45.95},{33.43,61.77},{17.10,31.09},
{84.96,132.10},{51.07,88.83},{76.25,132.04},{99.05,166.96},
{18.00,44.95},{19.70,52.96},{59.94,94.02},{15.82,48.84},
{66.19,115.66},{71.67,114.24},{14.15,33.93},{ 3.76,31.67},
{52.16,91.69},{51.76,88.38},{97.58,138.34},{55.60,101.85},
{97.02,144.97},{ 7.93, 9.12},{98.75,150.62},{ 3.50,21.44},
{96.65,134.11},{26.35,51.85},{97.00,146.85},{ 7.37,25.50},
{35.90,66.20},{50.50,89.54},{68.38,124.33},{50.23,106.00},
{46.78,89.52},{86.89,142.92},{96.78,155.91},{96.50,147.83},
{32.54,34.84},{92.16,138.84},{56.31,85.81},{12.32,39.66},
{80.16,116.45},{ 1.79,26.05},{ 3.41,28.04},{ 4.84,31.77},
{ 7.98,41.72},{88.53,150.86},{93.77,137.93},{51.48,92.98},
{33.53,64.20},{11.06,39.65},{39.01,64.17},{13.85,31.53},
{53.81,108.88},{98.09,147.25},{13.53,54.31},{17.99,35.17},
{67.32,111.08},{24.75,41.85},{41.95,77.57},{12.95,37.28},
{20.99,56.58},{68.55,107.85},{71.77,111.31},{64.49,98.94},
{ 3.55,23.81},{81.75,138.28},{88.68,132.28},{55.92,94.76},
{ 9.87,45.56},{ 0.39,18.48},{10.73,43.80},{48.28,87.56},
{53.14,63.43},{66.24,94.22},{40.82,65.66},{30.59,64.12},
{72.07,118.91},{86.42,124.38},{64.83,96.69},{31.56,67.90},
{53.72,82.97},{46.19,77.73},{38.63,88.90},{72.31,118.74},
{98.13,156.96},{85.32,140.11},{38.76,67.08},{ 6.23,20.83},
{41.90,60.27},{69.32,113.81},{95.46,157.47},{51.02,80.72},
{ 9.47,42.07},{17.83,40.39},{91.78,139.74},{23.75,61.08},
{11.72,38.13},{66.67,113.10},{55.44,95.63},{62.31,107.67},
{62.13,105.25},{42.96,72.97},{44.26,73.67},{75.51,117.92},
{35.63,59.92},{91.88,138.01},{74.63,131.13},{43.69,84.98},
{25.90,46.74},{82.85,138.29},{34.55,52.01},{99.55,149.24},
{15.79,43.53},{95.86,135.97},{62.70,111.12},{38.35,80.78},
{ 5.29,39.78},{58.23,99.45},{59.68,113.93},{78.77,127.72},
{49.16,81.76},{56.55,97.19},{52.21,87.98},{55.96,114.67},
{97.15,146.64},{94.01,151.93},{43.31,79.87},{34.16,56.68},
{28.83,59.31},{19.83,47.01},{10.21,38.64},{79.72,109.91},
{10.46,32.97},{28.94,57.52},{68.62,116.51},{ 6.84,24.44},
{15.52,46.41},{31.43,66.17},{70.64,119.01},{70.24,116.63},
{12.92,27.90},{49.57,75.40},{ 9.32,23.11},{51.67,93.68},
{ 3.98,32.19},{90.57,146.42},{17.60,48.24},{47.50,90.41},
{37.79,73.89},{42.77,91.83},{34.35,59.30},{99.78,158.47},
{75.98,128.12},{62.95,113.09},{97.16,152.02},{13.11,35.42},
{ 4.08,28.40},{57.40,94.01},{79.93,127.87},{68.57,110.25},
{10.90,36.33},{19.81,49.12},{86.06,119.99},{46.20,72.35},
{32.82,63.84},{ 4.93,25.19},{15.85,44.91},{20.04,51.73},
{86.56,141.89},{87.55,150.74},{35.34,79.65},{27.83,56.11},
{60.92,81.19},{25.61,53.37},{26.40,53.64},{86.86,124.99},
{ 7.29,40.82},{35.77,66.45},{18.39,64.97},{38.48,73.61},
{21.30,62.41},{24.25,39.91},{11.60,48.43},{11.48,39.85},
{94.78,131.88},{55.63,85.73},{81.29,135.01},{64.18,99.29},
{60.66,99.84},{13.13,32.77},{26.02,46.59},{11.15,49.39},
{92.35,139.92},{24.36,49.16},{14.54,16.82},{42.96,74.67},
{86.32,138.83},{68.68,108.94},{44.38,74.30},{49.13,85.12},
{36.17,77.52},{17.40,24.57},{20.68,40.13},{79.41,137.20},
{36.50,77.99},{ 5.68,24.48},{42.76,76.17},{19.46,35.54},
{ 1.08,26.36},{61.10,111.34},{42.43,69.87},{73.41,112.01},
{63.22,93.20},{86.54,148.56},{34.81,84.76},{60.56,120.24},
{18.57,61.75},{64.52,106.19},{84.26,125.45},{29.06,46.85},
{10.50,63.08},{61.35,115.87},{97.00,132.02},{54.89,106.64},
{60.02,100.90},{52.48,87.21},{18.39,45.96},{ 8.48,23.90},
{26.39,60.66},{ 9.06,32.27},{66.50,121.18},{57.74,94.70},
{25.66,65.11},{ 4.72,34.54},{98.24,135.21},{74.73,108.60},
{ 6.56,22.62},{59.14,90.97},{63.28,97.68},{62.08,105.43},
{ 3.32,31.89},{40.85,61.30},{64.77,114.54},{79.90,124.93},
{48.62,93.82},{39.18,62.48},{68.52,107.87},{17.21,50.66},
{96.98,147.20},{83.15,149.78},{62.95,96.44},{39.43,81.83},
{35.05,69.53},{97.00,130.77},{65.74,116.67},{59.47,108.46},
{99.79,143.53},{74.41,127.93},{49.23,79.84},{48.09,79.68},
{24.35,39.55},{64.06,102.73},{30.43,62.26},{35.18,76.80},
{35.97,72.38},{67.10,123.28},{ 5.39,28.04},{50.42,71.75},
{66.24,107.42},{62.68,98.82},{99.83,145.11},{81.83,158.11},
{11.95,45.17},{87.93,136.10},{94.07,142.53},{17.22,31.52},
{52.86,84.57},{ 7.54,31.19},{ 1.89,24.77},{32.53,63.64},
{ 4.01,25.49},{76.42,131.15},{29.62,79.68},{ 2.00,23.82},
{65.14,78.12},{21.85,55.01},{31.48,65.68},{49.86,82.59},
{12.83,45.28},{16.84,35.94},{86.08,136.94},{96.06,141.05},
{53.87,93.27},{83.51,133.44},{ 1.66,31.46},{95.55,137.01},
{17.86,53.11},{57.50,105.66},{53.71,86.03},{46.89,88.06},
{23.18,50.81},{86.92,130.30},{45.23,79.77},{72.64,104.64},
{31.12,54.10},{18.39,52.72},{ 0.05,30.71},{42.07,72.84},
{84.06,130.70},{47.92,72.23},{40.04,64.11},{31.22,46.99},
{50.19,91.88},{76.17,112.30},{58.03,92.31},{61.65,82.10},
{83.77,141.17},{ 7.64,33.09},{61.59,111.34},{87.63,134.22},
{ 5.70,32.06},{17.96,44.92},{82.95,138.52},{55.38,101.67},
{59.98,108.53},{39.15,83.49},{43.79,66.94},{38.95,76.62},
{13.00,33.96},{92.12,146.59},{33.69,54.26},{31.77,70.51},
{76.01,119.01},{89.04,112.33},{81.66,120.53},{26.01,70.28},
{50.63,97.30},{90.34,145.37},{53.87,91.36},{52.47,94.66},
{72.14,102.84},{58.85,99.75},{91.94,149.34},{58.00,104.40},
{19.38,48.33},{68.36,108.16},{65.65,105.51},{13.09,48.61},
{89.62,158.28},{76.18,110.95},{62.50,103.82},{26.82,57.27},
{29.22,66.26},{82.88,124.08},{41.74,87.98},{43.19,76.66},
{17.91,27.98},{27.89,60.05},{62.38,107.68},{63.27,113.16},
{ 5.44,25.32},{67.67,114.17},{ 7.12,36.77},{74.81,136.62},
{76.88,112.53},{89.74,150.27},{ 6.39,20.44},{22.51,50.76},
{94.03,138.47},{76.74,108.17},{ 7.04,29.72},{14.36,38.43},
{24.51,60.54},{82.24,130.52},{69.94,112.04},{28.69,76.99},
{14.45,50.79},{73.87,115.32},{25.68,45.81},{50.07,102.88},
{73.78,116.08},{11.07,20.97},{65.56,100.87},{14.48,45.08},
{83.19,122.32},{47.87,80.93},{24.48,39.61},{23.33,55.38},
{32.35,50.84},{36.16,82.82},{28.34,55.84},{11.59,28.66},
{ 0.40,26.06},{66.88,111.04},{69.58,113.59},{93.99,145.56},
{78.13,109.50},{29.81,57.28},{23.77,59.59},{70.49,98.18},
{44.95,98.42},{20.51,28.11},{25.98,60.82},{82.15,129.63},
{28.77,55.51},{66.78,102.42},{75.42,129.50},{23.39,64.67},
{76.55,110.56},{14.19,36.25},{33.00,51.26},{80.36,116.31},
{46.74,72.04},{25.14,51.22},{42.25,83.65},{84.13,153.10},
{80.50,123.69},{88.97,137.08},{95.68,149.98},{90.76,141.42},
{68.80,116.88},{23.59,34.43},{42.86,75.48},{65.71,105.58},
{78.32,123.27},{66.13,104.58},{ 1.34,42.60},{30.11,69.76},
{90.65,127.59},{34.78,69.18},{29.77,77.93},{38.36,66.87},
{92.92,147.24},{35.69,93.35},{62.34,95.76},{74.98,124.79},
{87.69,149.64},{20.97,42.04},{34.13,66.90},{36.64,71.45},
{51.59,89.00},{33.67,44.32},{16.69,39.25},{21.13,57.54},
{62.53,117.08},{37.14,70.64},{32.77,48.50},{75.61,118.15},
{33.30,70.29},{79.86,125.99},{75.59,128.36},{98.53,154.32},
{81.91,157.72},{22.40,49.18},{50.96,75.34},{60.90,114.22},
{10.56,54.60},{52.07,95.27},{43.17,82.10},{50.66,78.40},
{92.35,159.07},{95.95,159.33},{54.10,108.13},{38.65,81.78},
{60.38,95.30},{94.44,149.00},{37.09,56.81},{ 0.96,46.94},
{41.82,86.77},{38.02,60.62},{32.91,57.15},{43.08,78.34},
{16.36,45.02},{66.52,103.37},{13.53,54.06},{61.65,104.07},
{22.59,39.62},{17.16,60.07},{89.26,134.35},{36.66,64.65},
{27.42,54.52},{78.54,119.20},{74.01,117.82},{15.74,42.28},
{18.50,49.44},{26.40,53.02},{20.80,22.16},{79.34,141.43},
{91.51,144.71},{94.60,154.66},{16.13,47.29},{ 0.48,27.16},
{89.43,142.53},{ 6.85,36.89},{ 0.45,22.73},{ 5.81,30.04},
{36.10,53.96},{90.42,133.09},{92.77,147.72},{ 6.41,36.37},
{76.45,129.76},{75.49,116.43},{50.33,91.69},{67.94,109.11},
{ 2.25,17.10},{17.69,51.56},{52.66,81.55},{36.65,65.41},
{95.03,122.46},{43.45,65.97},{87.86,125.81},{16.87,28.76},
{12.43,27.92},{25.52,48.11},{96.43,152.85},{43.94,79.85}
};
double residual_error(double x, double g, double m, double c) {
double e = (m * x) + c - g;
return e * e;
}
__device__ double d_residual_error(double x, double g, double m, double c) {
double e = (m * x) + c - g;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].g, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].g, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
6bf034b8fdd5faa3b2a80671ab4779a5745f37de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
const char inputName256[] = "data/input_14_1_256.bin";
const char weight_winograd_Name256[] = "data/weight_winograd_256_256.bin";
const char bnBias_winograd_Name256[] = "data/bnBias_winograd_256.bin";
const char bnScale_winograd_Name256[] = "data/bnScale_winograd_256.bin";
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(
const float *__restrict__ pInputs,
float *__restrict__ pOutputs)
{
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z,
Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(
const float *__restrict__ pInputs,
const float *__restrict__ pBiases,
const float *__restrict__ pScales,
float *__restrict__ pOutputs)
{
int Tilex = blockIdx.x, Inx = threadIdx.x;
int Tiley = blockIdx.y, Iny = threadIdx.y;
int kz = blockIdx.z;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(
const float *__restrict__ A,
const float *__restrict__ B,
float *__restrict__ C)
{
int Tile = blockIdx.x, Part = blockIdx.y,
tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX,
c_kernel = c_input,
T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792,
2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840,
4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888,
6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
void kernel_256(double &time, double &ktime) {
float *input_ = get_parameter(inputName256, 16*16*256);
float *input, *output, *l_weights;
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256,
nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
float result[nOutput];
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
auto start = std::chrono::steady_clock::now();
hipMalloc((void **) &input, nInput<<2);
hipMalloc((void **) &output, nOutput<<2);
hipMalloc((void **) &l_weights, nWeights<<2);
hipMalloc((void **) &t_input, nTransInput<<2);
hipMalloc((void **) &ip, nInnerProd<<2);
hipMalloc((void **) &l_bnBias, nBias<<2);
hipMalloc((void **) &l_bnScale, nBias<<2);
hipMemset((void *) output, 0, nOutput<<2);
hipMemset((void *) t_input, 0, nTransInput<<2);
hipMemset((void *) ip, 0, nInnerProd<<2);
hipMemcpy(input, input_, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto kstart = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( kernel_256_winograd_BtdB) , dim3(dim3(4, 4, 2)), dim3(dim3(128, 6)), (6*6*128)<<2 , 0, 0, input, t_input);
hipLaunchKernelGGL(( kernel_256_OuterProduct_256), dim3(dim3(36, 2)), dim3(dim3(256, 4)), (8*256 + 32*256 + 8*256)<<2 , 0, 0, t_input, l_weights, ip);
hipLaunchKernelGGL(( kernel_256_winograd_AtIA) , dim3(dim3(4, 4, 256)), dim3(dim3(6, 6)), ((6*6)<<2), 0, 0, ip, l_bnBias, l_bnScale, output);
hipDeviceSynchronize();
auto kend = std::chrono::steady_clock::now();
ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count();
hipMemcpy(result, output, nOutput<<2, hipMemcpyDeviceToHost);
hipFree(t_input);
hipFree(ip);
hipFree(input);
hipFree(output);
hipFree(l_weights);
hipFree(l_bnScale);
hipFree(l_bnBias);
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
#ifdef DEBUG
double s = 0;
for (int i = 0; i < nOutput; i++) {
s += result[i];
}
printf("Check sum: %lf\n", s);
#endif
free(kernel);
free(bnScale);
free(bnBias);
free(input_);
}
|
6bf034b8fdd5faa3b2a80671ab4779a5745f37de.cu
|
#include "util.h"
const char inputName256[] = "data/input_14_1_256.bin";
const char weight_winograd_Name256[] = "data/weight_winograd_256_256.bin";
const char bnBias_winograd_Name256[] = "data/bnBias_winograd_256.bin";
const char bnScale_winograd_Name256[] = "data/bnScale_winograd_256.bin";
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(
const float *__restrict__ pInputs,
float *__restrict__ pOutputs)
{
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z,
Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(
const float *__restrict__ pInputs,
const float *__restrict__ pBiases,
const float *__restrict__ pScales,
float *__restrict__ pOutputs)
{
int Tilex = blockIdx.x, Inx = threadIdx.x;
int Tiley = blockIdx.y, Iny = threadIdx.y;
int kz = blockIdx.z;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(
const float *__restrict__ A,
const float *__restrict__ B,
float *__restrict__ C)
{
int Tile = blockIdx.x, Part = blockIdx.y,
tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX,
c_kernel = c_input,
T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792,
2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840,
4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888,
6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
void kernel_256(double &time, double &ktime) {
float *input_ = get_parameter(inputName256, 16*16*256);
float *input, *output, *l_weights;
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256,
nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
float result[nOutput];
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
auto start = std::chrono::steady_clock::now();
cudaMalloc((void **) &input, nInput<<2);
cudaMalloc((void **) &output, nOutput<<2);
cudaMalloc((void **) &l_weights, nWeights<<2);
cudaMalloc((void **) &t_input, nTransInput<<2);
cudaMalloc((void **) &ip, nInnerProd<<2);
cudaMalloc((void **) &l_bnBias, nBias<<2);
cudaMalloc((void **) &l_bnScale, nBias<<2);
cudaMemset((void *) output, 0, nOutput<<2);
cudaMemset((void *) t_input, 0, nTransInput<<2);
cudaMemset((void *) ip, 0, nInnerProd<<2);
cudaMemcpy(input, input_, nInput<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_weights, kernel, nWeights<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bnBias, bnBias, nBias<<2, cudaMemcpyHostToDevice);
cudaMemcpy(l_bnScale, bnScale, nBias<<2, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto kstart = std::chrono::steady_clock::now();
kernel_256_winograd_BtdB <<<dim3(4, 4, 2), dim3(128, 6), (6*6*128)<<2 >>> (input, t_input);
kernel_256_OuterProduct_256<<<dim3(36, 2), dim3(256, 4), (8*256 + 32*256 + 8*256)<<2 >>> (t_input, l_weights, ip);
kernel_256_winograd_AtIA <<<dim3(4, 4, 256), dim3(6, 6), ((6*6)<<2)>>> (ip, l_bnBias, l_bnScale, output);
cudaDeviceSynchronize();
auto kend = std::chrono::steady_clock::now();
ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count();
cudaMemcpy(result, output, nOutput<<2, cudaMemcpyDeviceToHost);
cudaFree(t_input);
cudaFree(ip);
cudaFree(input);
cudaFree(output);
cudaFree(l_weights);
cudaFree(l_bnScale);
cudaFree(l_bnBias);
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
#ifdef DEBUG
double s = 0;
for (int i = 0; i < nOutput; i++) {
s += result[i];
}
printf("Check sum: %lf\n", s);
#endif
free(kernel);
free(bnScale);
free(bnBias);
free(input_);
}
|
841106a121ca9bc16cd603b8c1bd844ee74e6cff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Ejercicio 5 Prctica 4: CUDA
* Mariana Hernndez
* Alan Crdova
*/
#include <stdio.h>
#define STRIDE 32
#define OFFSET 0
#define GROUP_SIZE 512
#define SHARED_SIZE 256
// tamanio
#define n 8
//#define m 8
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N, int sec_size) {
__shared__ float AS[SHARED_SIZE];
__shared__ float BS[SHARED_SIZE];
int ROW = blockIdx.y*sec_size+threadIdx.y;
int COL = blockIdx.x*sec_size+threadIdx.x;
float tmpSum = 0;
for(int j = 0; j < N/sec_size; j++){
AS[threadIdx.y * sec_size + threadIdx.x] = A[ROW * N + (j * sec_size + threadIdx.x)];
BS[threadIdx.y * sec_size + threadIdx.x] = B[COL + (j * sec_size * N) + (threadIdx.y*N)];
__syncthreads();
for (int i = 0; i < sec_size; i++) {
tmpSum += AS[threadIdx.y * sec_size + i] * BS[i * sec_size + threadIdx.x];
}
__syncthreads();
}
C[ROW * N + COL] = tmpSum;
}
// main routine that executes on the host
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
float *mat1_h, *mat2_h, *mat_res_h, *mat1_d, *mat2_d, *mat_res_d;
const int N = 1<<10; // Make a big array with 2**N elements
size_t size = N * sizeof(float);
const int n_mat = n * n;
size_t sz = n_mat * sizeof(float);
/* Auxiliares para medir tiempos */
hipEvent_t start, stop;
float time;
a_h = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array on device
mat1_h = (float *)malloc(sz); // Allocate array on host
mat2_h = (float *)malloc(sz); // Allocate array on host
mat_res_h = (float *)malloc(sz); // Allocate array on host
hipMalloc((void **) &mat1_d, sz); // Allocate array on device
hipMalloc((void **) &mat2_d, sz); // Allocate array on device
hipMalloc((void **) &mat_res_d, sz); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++){
a_h[i] = (float)i;
}
for (int i = 0; i < n_mat; ++i){
mat1_h[i] = i % 8;
mat2_h[i] = i % 8;
mat_res_h[i] = 0;
}
printf("mats:\n");
for (int i = 0; i < n_mat; ++i){
if(i%n == 0)
printf("\n");
printf("%.2f ", mat1_h[i] );
}
hipMemcpy(mat1_d, mat1_h, sz, hipMemcpyHostToDevice);
hipMemcpy(mat2_d, mat2_h, sz, hipMemcpyHostToDevice);
hipMemcpy(mat_res_d, mat_res_h, sz, hipMemcpyHostToDevice);
checkCUDAError("memcpy");
// Create timer for timing CUDA calculation
//PPunsigned int timer = 0;
//PPcutCreateTimer( &timer );
hipEventCreate(&start);
hipEventCreate(&stop);
// Set number of threads and blocks
//int n_threads_per_block = 128;//1<<9; // 512 threads per block
//int n_blocks = 256;//1<<10; // 1024 blocks
// Do calculation on device
hipEventRecord(start,0);
//matrix_mult <<< n_blocks, n_threads_per_block >>> (mat1_d, mat2_d, mat_res_d, n);
dim3 threadsPerBlock(n, n);
dim3 blocksPerGrid(1, 1);
if (n*n > 512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(double(n)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(n)/double(threadsPerBlock.y));
}
hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, mat1_d, mat2_d, mat_res_d, n, threadsPerBlock.x);
hipDeviceSynchronize(); // Wait for matrix_mult to finish on CUDA
checkCUDAError("kernel invocation");
// Retrieve result from device and store it in host array
hipMemcpy(mat1_h, mat1_d, sz, hipMemcpyDeviceToHost);
hipMemcpy(mat2_h, mat2_d, sz, hipMemcpyDeviceToHost);
hipMemcpy(mat_res_h, mat_res_d, sz, hipMemcpyDeviceToHost);
checkCUDAError("memcpy");
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &time, start, stop );
// Print some of the results
//for (int i=0; i<N; i+=N/50) printf("%d %f\n", i, a_h[i]);
// Imprime tiempo de ejecucin
printf("\n\nTIEMPO DE EJECUCIN: %f mSeg\n\n", time);
printf("res:\n");
for (int i = 0; i < n_mat; ++i)
{
if(i%n == 0)
printf("\n");
printf("%.2f ", mat_res_h[i] );
}
hipEventDestroy( start );
hipEventDestroy( stop );
free(mat1_h);
free(mat2_h);
free(mat_res_h);
hipFree(mat1_d);
hipFree(mat2_d);
hipFree(mat_res_d);
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
841106a121ca9bc16cd603b8c1bd844ee74e6cff.cu
|
/*
* Ejercicio 5 Práctica 4: CUDA
* Mariana Hernández
* Alan Córdova
*/
#include <stdio.h>
#define STRIDE 32
#define OFFSET 0
#define GROUP_SIZE 512
#define SHARED_SIZE 256
// tamanio
#define n 8
//#define m 8
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N, int sec_size) {
__shared__ float AS[SHARED_SIZE];
__shared__ float BS[SHARED_SIZE];
int ROW = blockIdx.y*sec_size+threadIdx.y;
int COL = blockIdx.x*sec_size+threadIdx.x;
float tmpSum = 0;
for(int j = 0; j < N/sec_size; j++){
AS[threadIdx.y * sec_size + threadIdx.x] = A[ROW * N + (j * sec_size + threadIdx.x)];
BS[threadIdx.y * sec_size + threadIdx.x] = B[COL + (j * sec_size * N) + (threadIdx.y*N)];
__syncthreads();
for (int i = 0; i < sec_size; i++) {
tmpSum += AS[threadIdx.y * sec_size + i] * BS[i * sec_size + threadIdx.x];
}
__syncthreads();
}
C[ROW * N + COL] = tmpSum;
}
// main routine that executes on the host
int main(void)
{
float *a_h, *a_d; // Pointer to host & device arrays
float *mat1_h, *mat2_h, *mat_res_h, *mat1_d, *mat2_d, *mat_res_d;
const int N = 1<<10; // Make a big array with 2**N elements
size_t size = N * sizeof(float);
const int n_mat = n * n;
size_t sz = n_mat * sizeof(float);
/* Auxiliares para medir tiempos */
cudaEvent_t start, stop;
float time;
a_h = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array on device
mat1_h = (float *)malloc(sz); // Allocate array on host
mat2_h = (float *)malloc(sz); // Allocate array on host
mat_res_h = (float *)malloc(sz); // Allocate array on host
cudaMalloc((void **) &mat1_d, sz); // Allocate array on device
cudaMalloc((void **) &mat2_d, sz); // Allocate array on device
cudaMalloc((void **) &mat_res_d, sz); // Allocate array on device
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++){
a_h[i] = (float)i;
}
for (int i = 0; i < n_mat; ++i){
mat1_h[i] = i % 8;
mat2_h[i] = i % 8;
mat_res_h[i] = 0;
}
printf("mats:\n");
for (int i = 0; i < n_mat; ++i){
if(i%n == 0)
printf("\n");
printf("%.2f ", mat1_h[i] );
}
cudaMemcpy(mat1_d, mat1_h, sz, cudaMemcpyHostToDevice);
cudaMemcpy(mat2_d, mat2_h, sz, cudaMemcpyHostToDevice);
cudaMemcpy(mat_res_d, mat_res_h, sz, cudaMemcpyHostToDevice);
checkCUDAError("memcpy");
// Create timer for timing CUDA calculation
//PPunsigned int timer = 0;
//PPcutCreateTimer( &timer );
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Set number of threads and blocks
//int n_threads_per_block = 128;//1<<9; // 512 threads per block
//int n_blocks = 256;//1<<10; // 1024 blocks
// Do calculation on device
cudaEventRecord(start,0);
//matrix_mult <<< n_blocks, n_threads_per_block >>> (mat1_d, mat2_d, mat_res_d, n);
dim3 threadsPerBlock(n, n);
dim3 blocksPerGrid(1, 1);
if (n*n > 512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(double(n)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(n)/double(threadsPerBlock.y));
}
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(mat1_d, mat2_d, mat_res_d, n, threadsPerBlock.x);
cudaDeviceSynchronize(); // Wait for matrix_mult to finish on CUDA
checkCUDAError("kernel invocation");
// Retrieve result from device and store it in host array
cudaMemcpy(mat1_h, mat1_d, sz, cudaMemcpyDeviceToHost);
cudaMemcpy(mat2_h, mat2_d, sz, cudaMemcpyDeviceToHost);
cudaMemcpy(mat_res_h, mat_res_d, sz, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
// Print some of the results
//for (int i=0; i<N; i+=N/50) printf("%d %f\n", i, a_h[i]);
// Imprime tiempo de ejecución
printf("\n\nTIEMPO DE EJECUCIÓN: %f mSeg\n\n", time);
printf("res:\n");
for (int i = 0; i < n_mat; ++i)
{
if(i%n == 0)
printf("\n");
printf("%.2f ", mat_res_h[i] );
}
cudaEventDestroy( start );
cudaEventDestroy( stop );
free(mat1_h);
free(mat2_h);
free(mat_res_h);
cudaFree(mat1_d);
cudaFree(mat2_d);
cudaFree(mat_res_d);
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
f87891f68001cffdfb20ae7746952cb06d3218ff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bonsai.h"
#undef USE_THRUST
#include "support_kernels.cu"
#include <stdio.h>
#include "../profiling/bonsai_timing.h"
PROF_MODULE(compute_propertiesD);
#include "node_specs.h"
static __device__ __forceinline__ void sh_MinMax2(int i, int j, float3 *r_min, float3 *r_max, volatile float3 *sh_rmin, volatile float3 *sh_rmax)
{
sh_rmin[i].x = (*r_min).x = fminf((*r_min).x, sh_rmin[j].x);
sh_rmin[i].y = (*r_min).y = fminf((*r_min).y, sh_rmin[j].y);
sh_rmin[i].z = (*r_min).z = fminf((*r_min).z, sh_rmin[j].z);
sh_rmax[i].x = (*r_max).x = fmaxf((*r_max).x, sh_rmax[j].x);
sh_rmax[i].y = (*r_max).y = fmaxf((*r_max).y, sh_rmax[j].y);
sh_rmax[i].z = (*r_max).z = fmaxf((*r_max).z, sh_rmax[j].z);
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
//Helper functions for leaf-nodes
static __device__ void compute_monopole(double &mass, double &posx,
double &posy, double &posz,
float4 pos)
{
mass += pos.w;
posx += pos.w*pos.x;
posy += pos.w*pos.y;
posz += pos.w*pos.z;
}
static __device__ void compute_quadropole(double &oct_q11, double &oct_q22, double &oct_q33,
double &oct_q12, double &oct_q13, double &oct_q23,
float4 pos)
{
oct_q11 += pos.w * pos.x*pos.x;
oct_q22 += pos.w * pos.y*pos.y;
oct_q33 += pos.w * pos.z*pos.z;
oct_q12 += pos.w * pos.x*pos.y;
oct_q13 += pos.w * pos.y*pos.z;
oct_q23 += pos.w * pos.z*pos.x;
}
static __device__ void compute_bounds(float3 &r_min, float3 &r_max,
float4 pos)
{
r_min.x = fminf(r_min.x, pos.x);
r_min.y = fminf(r_min.y, pos.y);
r_min.z = fminf(r_min.z, pos.z);
r_max.x = fmaxf(r_max.x, pos.x);
r_max.y = fmaxf(r_max.y, pos.y);
r_max.z = fmaxf(r_max.z, pos.z);
}
//Non-leaf node helper functions
static __device__ void compute_monopole_node(double &mass, double &posx,
double &posy, double &posz,
double4 pos)
{
mass += pos.w;
posx += pos.w*pos.x;
posy += pos.w*pos.y;
posz += pos.w*pos.z;
}
static __device__ void compute_quadropole_node(double &oct_q11, double &oct_q22, double &oct_q33,
double &oct_q12, double &oct_q13, double &oct_q23,
double4 Q0, double4 Q1)
{
oct_q11 += Q0.x;
oct_q22 += Q0.y;
oct_q33 += Q0.z;
oct_q12 += Q1.x;
oct_q13 += Q1.y;
oct_q23 += Q1.z;
}
static __device__ void compute_bounds_node(float3 &r_min, float3 &r_max,
float4 node_min, float4 node_max)
{
r_min.x = fminf(r_min.x, node_min.x);
r_min.y = fminf(r_min.y, node_min.y);
r_min.z = fminf(r_min.z, node_min.z);
r_max.x = fmaxf(r_max.x, node_max.x);
r_max.y = fmaxf(r_max.y, node_max.y);
r_max.z = fmaxf(r_max.z, node_max.z);
}
KERNEL_DECLARE(compute_leaf)( const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
real4 *body_pos,
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
real4 *body_vel,
uint *body_id) {
CUXTIMER("compute_leaf");
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[256];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[128];
//Set the shared memory for these threads and exit the thread
if (id >= n_leafs)
{
sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f;
sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f;
return;
}
//Since nodes are intermixes with non-leafs in the node_bodies array
//we get a leaf-id from the leafsIdxs array
int nodeID = leafsIdxs[id];
const uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint lastChild = bij.y; //TODO maybe have to increase it by 1
//Variables holding properties and intermediate answers
float4 p;
double mass, posx, posy, posz;
mass = posx = posy = posz = 0.0;
double oct_q11, oct_q22, oct_q33;
double oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = 0.0;
oct_q12 = oct_q13 = oct_q23 = 0.0;
float3 r_min, r_max;
r_min = make_float3(+1e10f, +1e10f, +1e10f);
r_max = make_float3(-1e10f, -1e10f, -1e10f);
//Loop over the children=>particles=>bodys
//unroll increases register usage #pragma unroll 16
float maxEps = -100.0f;
int count=0;
for(int i=firstChild; i < lastChild; i++)
{
p = body_pos[i];
maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf
count++;
compute_monopole(mass, posx, posy, posz, p);
compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p);
compute_bounds(r_min, r_max, p);
}
double4 mon = {posx, posy, posz, mass};
double im = 1.0/mon.w;
if(mon.w == 0) im = 0; //Allow tracer/massless particles
mon.x *= im;
mon.y *= im;
mon.z *= im;
//jbedorf, store darkMatterMass in Q1.w
//stellar mass is mon.w-darkMatterMass
double4 Q0, Q1;
Q0 = make_double4(oct_q11, oct_q22, oct_q33, maxEps); //Store max softening
Q1 = make_double4(oct_q12, oct_q13, oct_q23, 0.0f);
//Store the leaf properties
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole
multipole[3*nodeID + 2] = Q1; //Quadropole
//Store the node boundaries
nodeLowerBounds[nodeID] = make_float4(r_min.x, r_min.y, r_min.z, 0.0f);
nodeUpperBounds[nodeID] = make_float4(r_max.x, r_max.y, r_max.z, 1.0f); //4th parameter is set to 1 to indicate this is a leaf
return;
}
//Function goes level by level (starting from deepest) and computes
//the properties of the non-leaf nodes
KERNEL_DECLARE(compute_non_leaf)(const int curLevel, //Level for which we calc
uint *leafsIdxs, //Conversion of ids
uint *node_level_list, //Contains the start nodes of each lvl
uint *n_children, //Reference from node to first child and number of childs
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds){
CUXTIMER("compute_non_leaf");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
const int endNode = node_level_list[curLevel];
const int startNode = node_level_list[curLevel-1];
if(idx >= (endNode-startNode)) return;
const int nodeID = leafsIdxs[idx + startNode];
//Get the children info
const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define?
const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define?
//Variables
double mass, posx, posy, posz;
mass = posx = posy = posz = 0.0;
double oct_q11, oct_q22, oct_q33;
double oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = 0.0;
oct_q12 = oct_q13 = oct_q23 = 0.0;
float3 r_min, r_max;
r_min = make_float3(+1e10f, +1e10f, +1e10f);
r_max = make_float3(-1e10f, -1e10f, -1e10f);
//Process the children (1 to 8)
float maxEps = -100.0f;
for(int i=firstChild; i < firstChild+nChildren; i++)
{
//Gogo process this data!
double4 tmon = multipole[3*i + 0];
maxEps = max(multipole[3*i + 1].w, maxEps);
compute_monopole_node(mass, posx, posy, posz, tmon);
compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23,
multipole[3*i + 1], multipole[3*i + 2]);
compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]);
}
//Save the bounds
nodeLowerBounds[nodeID] = make_float4(r_min.x, r_min.y, r_min.z, 0.0f);
nodeUpperBounds[nodeID] = make_float4(r_max.x, r_max.y, r_max.z, 0.0f); //4th is set to 0 to indicate a non-leaf
//Regularize and store the results
double4 mon = {posx, posy, posz, mass};
double im = 1.0/mon.w;
if(mon.w == 0) im = 0; //Allow tracer/massless particles
mon.x *= im;
mon.y *= im;
mon.z *= im;
double4 Q0, Q1;
Q0 = make_double4(oct_q11, oct_q22, oct_q33, maxEps); //store max Eps
Q1 = make_double4(oct_q12, oct_q13, oct_q23, 0.0f);
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole1
multipole[3*nodeID + 2] = Q1; //Quadropole2
return;
}
KERNEL_DECLARE(compute_scaling)(const int node_count,
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
uint *n_children,
real4 *multipoleF,
float theta,
real4 *boxSizeInfo,
real4 *boxCenterInfo,
uint2 *node_bodies){
CUXTIMER("compute_scaling");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= node_count) return;
double4 monD, Q0, Q1;
monD = multipole[3*idx + 0]; //Monopole
Q0 = multipole[3*idx + 1]; //Quadropole1
Q1 = multipole[3*idx + 2]; //Quadropole2
//Scale the quadropole
double im = 1.0 / monD.w;
if(monD.w == 0) im = 0; //Allow tracer/massless particles
Q0.x = Q0.x*im - monD.x*monD.x;
Q0.y = Q0.y*im - monD.y*monD.y;
Q0.z = Q0.z*im - monD.z*monD.z;
Q1.x = Q1.x*im - monD.x*monD.y;
Q1.y = Q1.y*im - monD.y*monD.z;
Q1.z = Q1.z*im - monD.x*monD.z;
//Switch the y and z parameter
double temp = Q1.y;
Q1.y = Q1.z; Q1.z = temp;
//Convert the doubles to floats
float4 mon = make_float4(monD.x, monD.y, monD.z, monD.w);
multipoleF[3*idx + 0] = mon;
multipoleF[3*idx + 1] = make_float4(Q0.x, Q0.y, Q0.z, Q0.w); //Quadropole1
multipoleF[3*idx + 2] = make_float4(Q1.x, Q1.y, Q1.z, Q1.w); //Quadropole2
float4 r_min, r_max;
r_min = nodeLowerBounds[idx];
r_max = nodeUpperBounds[idx];
//Compute center and size of the box
float3 boxCenter;
boxCenter.x = 0.5*(r_min.x + r_max.x);
boxCenter.y = 0.5*(r_min.y + r_max.y);
boxCenter.z = 0.5*(r_min.z + r_max.z);
float3 boxSize = make_float3(fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)),
fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)),
fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z)));
//Calculate distance between center of the box and the center of mass
float3 s3 = make_float3((boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z));
double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z));
//If mass-less particles form a node, the s would be huge in opening angle, make it 0
if(fabs(mon.w) < 1e-10) s = 0;
//Length of the box, note times 2 since we only computed half the distance before
float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z));
//Store the box size and opening criteria
boxSizeInfo[idx].x = boxSize.x;
boxSizeInfo[idx].y = boxSize.y;
boxSizeInfo[idx].z = boxSize.z;
boxSizeInfo[idx].w = __int_as_float(n_children[idx]);
#if 1
boxCenterInfo[idx].x = boxCenter.x;
boxCenterInfo[idx].y = boxCenter.y;
boxCenterInfo[idx].z = boxCenter.z;
#else /* added by egaburov, see dev_approximate_gravity_warp.cu for matching code*/
boxCenterInfo[idx].x = mon.x;
boxCenterInfo[idx].y = mon.y;
boxCenterInfo[idx].z = mon.z;
#endif
//Extra check, shouldnt be necessary, probably it is otherwise the test for leaf can fail
//So it IS important Otherwise 0.0 < 0 can fail, now it will be: -1e-12 < 0
if(l < 0.000001)
l = 0.000001;
#ifdef IMPBH
float cellOp = (l/theta) + s;
#else
//Minimum distance method
float cellOp = (l/theta);
#endif
cellOp = cellOp*cellOp;
uint2 bij = node_bodies[idx];
uint pfirst = bij.x & ILEVELMASK;
uint nchild = bij.y - pfirst;
//If this is (leaf)node with only 1 particle then we change
//the opening criteria to a large number to force that the
//leaf will be opened and the particle data is used
//instead of an approximation.
//This because sometimes (mass*pos)*(1.0/mass) != pos
//even in full double precision
if(nchild == 1)
{
cellOp = 10e10; //Force this node to be opened
}
if(r_max.w > 0)
{
cellOp = -cellOp; //This is a leaf node
}
boxCenterInfo[idx].w = cellOp;
//Change the indirections of the leaf nodes so they point to
//the particle data
bool leaf = (r_max.w > 0);
if(leaf)
{
pfirst = pfirst | ((nchild-1) << LEAFBIT);
boxSizeInfo[idx].w = __int_as_float(pfirst);
}
return;
}
//Compute the properties for the groups
KERNEL_DECLARE(gpu_setPHGroupData)(const int n_groups,
const int n_particles,
real4 *bodies_pos,
int2 *group_list,
real4 *groupCenterInfo,
real4 *groupSizeInfo){
CUXTIMER("setPHGroupData");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if(bid >= n_groups) return;
//Do a reduction on the particles assigned to this group
volatile __shared__ float3 shmem[2*NCRIT];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[NCRIT];
float3 r_min = make_float3(+1e10f, +1e10f, +1e10f);
float3 r_max = make_float3(-1e10f, -1e10f, -1e10f);
int start = group_list[bid].x;
int end = group_list[bid].y;
int partIdx = start + threadIdx.x;
//Set the shared memory with the data
if (partIdx >= end)
{
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
}
else
{
sh_rmin[tid].x = r_min.x = bodies_pos[partIdx].x; sh_rmin[tid].y = r_min.y = bodies_pos[partIdx].y; sh_rmin[tid].z = r_min.z = bodies_pos[partIdx].z;
sh_rmax[tid].x = r_max.x = bodies_pos[partIdx].x; sh_rmax[tid].y = r_max.y = bodies_pos[partIdx].y; sh_rmax[tid].z = r_max.z = bodies_pos[partIdx].z;
}
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax2(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax2(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax2(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) {sh_MinMax2(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); }
if(blockDim.x >= 32) if (tid < 16) { sh_MinMax2(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); }
if(tid < 8)
{
sh_MinMax2(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compute the group center and size
float3 grpCenter;
grpCenter.x = 0.5*(r_min.x + r_max.x);
grpCenter.y = 0.5*(r_min.y + r_max.y);
grpCenter.z = 0.5*(r_min.z + r_max.z);
float3 grpSize = make_float3(fmaxf(fabs(grpCenter.x-r_min.x), fabs(grpCenter.x-r_max.x)),
fmaxf(fabs(grpCenter.y-r_min.y), fabs(grpCenter.y-r_max.y)),
fmaxf(fabs(grpCenter.z-r_min.z), fabs(grpCenter.z-r_max.z)));
//Store the box size and opening criteria
groupSizeInfo[bid].x = grpSize.x;
groupSizeInfo[bid].y = grpSize.y;
groupSizeInfo[bid].z = grpSize.z;
int nchild = end-start;
start = start | (nchild-1) << CRITBIT;
groupSizeInfo[bid].w = __int_as_float(start);
float l = max(grpSize.x, max(grpSize.y, grpSize.z));
groupCenterInfo[bid].x = grpCenter.x;
groupCenterInfo[bid].y = grpCenter.y;
groupCenterInfo[bid].z = grpCenter.z;
//Test stats for physical group size
groupCenterInfo[bid].w = l;
} //end tid == 0
}//end copyNode2grp
//Compute the properties for the groups
KERNEL_DECLARE(gpu_setPHGroupDataGetKey)(const int n_groups,
const int n_particles,
real4 *bodies_pos,
int2 *group_list,
real4 *groupCenterInfo,
real4 *groupSizeInfo,
uint4 *body_key,
float4 corner){
CUXTIMER("setPHGroupDataGetKey");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if(bid >= n_groups) return;
//Do a reduction on the particles assigned to this group
volatile __shared__ float3 shmem[2*NCRIT];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[NCRIT];
float3 r_min = make_float3(+1e10f, +1e10f, +1e10f);
float3 r_max = make_float3(-1e10f, -1e10f, -1e10f);
int start = group_list[bid].x;
int end = group_list[bid].y;
int partIdx = start + threadIdx.x;
//Set the shared memory with the data
if (partIdx >= end)
{
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
}
else
{
sh_rmin[tid].x = r_min.x = bodies_pos[partIdx].x; sh_rmin[tid].y = r_min.y = bodies_pos[partIdx].y; sh_rmin[tid].z = r_min.z = bodies_pos[partIdx].z;
sh_rmax[tid].x = r_max.x = bodies_pos[partIdx].x; sh_rmax[tid].y = r_max.y = bodies_pos[partIdx].y; sh_rmax[tid].z = r_max.z = bodies_pos[partIdx].z;
}
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax2(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax2(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax2(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) {sh_MinMax2(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); }
if(blockDim.x >= 32) if (tid < 16) { sh_MinMax2(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); }
if(tid < 8)
{
sh_MinMax2(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compute the group center and size
float3 grpCenter;
grpCenter.x = 0.5*(r_min.x + r_max.x);
grpCenter.y = 0.5*(r_min.y + r_max.y);
grpCenter.z = 0.5*(r_min.z + r_max.z);
float3 grpSize = make_float3(fmaxf(fabs(grpCenter.x-r_min.x), fabs(grpCenter.x-r_max.x)),
fmaxf(fabs(grpCenter.y-r_min.y), fabs(grpCenter.y-r_max.y)),
fmaxf(fabs(grpCenter.z-r_min.z), fabs(grpCenter.z-r_max.z)));
//Store the box size and opening criteria
groupSizeInfo[bid].x = grpSize.x;
groupSizeInfo[bid].y = grpSize.y;
groupSizeInfo[bid].z = grpSize.z;
real4 pos = bodies_pos[start];
int nchild = end-start;
start = start | (nchild-1) << CRITBIT;
groupSizeInfo[bid].w = __int_as_float(start);
float l = max(grpSize.x, max(grpSize.y, grpSize.z));
groupCenterInfo[bid].x = grpCenter.x;
groupCenterInfo[bid].y = grpCenter.y;
groupCenterInfo[bid].z = grpCenter.z;
//Test stats for physical group size
groupCenterInfo[bid].w = l;
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
body_key[bid] = get_key(crd);
} //end tid == 0
}//end copyNode2grp
//Compute the key for the groups
KERNEL_DECLARE(gpu_setPHGroupDataGetKey2)(const int n_groups,
real4 *bodies_pos,
int2 *group_list,
uint4 *body_key,
float4 corner){
CUXTIMER("setPHGroupDataGetKey2");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= n_groups) return;
int start = group_list[idx].x;
real4 pos = bodies_pos[start];
// int end = group_list[idx].y-1;
// real4 pos = bodies_pos[end];
// int end = group_list[idx].y-1;
// int start = group_list[idx].x;
// start = (end+start) / 2;
// real4 pos = bodies_pos[start];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
// uint2 key = get_key_morton(crd);
// body_key[idx] = make_uint4(key.x, key.y, 0,0);
body_key[idx] = get_key(crd); //has to be PH key in order to prevent the need for sorting
}//end copyNode2grp
|
f87891f68001cffdfb20ae7746952cb06d3218ff.cu
|
#include "bonsai.h"
#undef USE_THRUST
#include "support_kernels.cu"
#include <stdio.h>
#include "../profiling/bonsai_timing.h"
PROF_MODULE(compute_propertiesD);
#include "node_specs.h"
static __device__ __forceinline__ void sh_MinMax2(int i, int j, float3 *r_min, float3 *r_max, volatile float3 *sh_rmin, volatile float3 *sh_rmax)
{
sh_rmin[i].x = (*r_min).x = fminf((*r_min).x, sh_rmin[j].x);
sh_rmin[i].y = (*r_min).y = fminf((*r_min).y, sh_rmin[j].y);
sh_rmin[i].z = (*r_min).z = fminf((*r_min).z, sh_rmin[j].z);
sh_rmax[i].x = (*r_max).x = fmaxf((*r_max).x, sh_rmax[j].x);
sh_rmax[i].y = (*r_max).y = fmaxf((*r_max).y, sh_rmax[j].y);
sh_rmax[i].z = (*r_max).z = fmaxf((*r_max).z, sh_rmax[j].z);
}
//////////////////////////////
//////////////////////////////
//////////////////////////////
//Helper functions for leaf-nodes
static __device__ void compute_monopole(double &mass, double &posx,
double &posy, double &posz,
float4 pos)
{
mass += pos.w;
posx += pos.w*pos.x;
posy += pos.w*pos.y;
posz += pos.w*pos.z;
}
static __device__ void compute_quadropole(double &oct_q11, double &oct_q22, double &oct_q33,
double &oct_q12, double &oct_q13, double &oct_q23,
float4 pos)
{
oct_q11 += pos.w * pos.x*pos.x;
oct_q22 += pos.w * pos.y*pos.y;
oct_q33 += pos.w * pos.z*pos.z;
oct_q12 += pos.w * pos.x*pos.y;
oct_q13 += pos.w * pos.y*pos.z;
oct_q23 += pos.w * pos.z*pos.x;
}
static __device__ void compute_bounds(float3 &r_min, float3 &r_max,
float4 pos)
{
r_min.x = fminf(r_min.x, pos.x);
r_min.y = fminf(r_min.y, pos.y);
r_min.z = fminf(r_min.z, pos.z);
r_max.x = fmaxf(r_max.x, pos.x);
r_max.y = fmaxf(r_max.y, pos.y);
r_max.z = fmaxf(r_max.z, pos.z);
}
//Non-leaf node helper functions
static __device__ void compute_monopole_node(double &mass, double &posx,
double &posy, double &posz,
double4 pos)
{
mass += pos.w;
posx += pos.w*pos.x;
posy += pos.w*pos.y;
posz += pos.w*pos.z;
}
static __device__ void compute_quadropole_node(double &oct_q11, double &oct_q22, double &oct_q33,
double &oct_q12, double &oct_q13, double &oct_q23,
double4 Q0, double4 Q1)
{
oct_q11 += Q0.x;
oct_q22 += Q0.y;
oct_q33 += Q0.z;
oct_q12 += Q1.x;
oct_q13 += Q1.y;
oct_q23 += Q1.z;
}
static __device__ void compute_bounds_node(float3 &r_min, float3 &r_max,
float4 node_min, float4 node_max)
{
r_min.x = fminf(r_min.x, node_min.x);
r_min.y = fminf(r_min.y, node_min.y);
r_min.z = fminf(r_min.z, node_min.z);
r_max.x = fmaxf(r_max.x, node_max.x);
r_max.y = fmaxf(r_max.y, node_max.y);
r_max.z = fmaxf(r_max.z, node_max.z);
}
KERNEL_DECLARE(compute_leaf)( const int n_leafs,
uint *leafsIdxs,
uint2 *node_bodies,
real4 *body_pos,
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
real4 *body_vel,
uint *body_id) {
CUXTIMER("compute_leaf");
const uint bid = blockIdx.y * gridDim.x + blockIdx.x;
const uint tid = threadIdx.x;
const uint id = bid * blockDim.x + tid;
volatile __shared__ float3 shmem[256];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[128];
//Set the shared memory for these threads and exit the thread
if (id >= n_leafs)
{
sh_rmin[tid].x = +1e10f; sh_rmin[tid].y = +1e10f; sh_rmin[tid].z = +1e10f;
sh_rmax[tid].x = -1e10f; sh_rmax[tid].y = -1e10f; sh_rmax[tid].z = -1e10f;
return;
}
//Since nodes are intermixes with non-leafs in the node_bodies array
//we get a leaf-id from the leafsIdxs array
int nodeID = leafsIdxs[id];
const uint2 bij = node_bodies[nodeID];
const uint firstChild = bij.x & ILEVELMASK;
const uint lastChild = bij.y; //TODO maybe have to increase it by 1
//Variables holding properties and intermediate answers
float4 p;
double mass, posx, posy, posz;
mass = posx = posy = posz = 0.0;
double oct_q11, oct_q22, oct_q33;
double oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = 0.0;
oct_q12 = oct_q13 = oct_q23 = 0.0;
float3 r_min, r_max;
r_min = make_float3(+1e10f, +1e10f, +1e10f);
r_max = make_float3(-1e10f, -1e10f, -1e10f);
//Loop over the children=>particles=>bodys
//unroll increases register usage #pragma unroll 16
float maxEps = -100.0f;
int count=0;
for(int i=firstChild; i < lastChild; i++)
{
p = body_pos[i];
maxEps = fmaxf(body_vel[i].w, maxEps); //Determine the max softening within this leaf
count++;
compute_monopole(mass, posx, posy, posz, p);
compute_quadropole(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23, p);
compute_bounds(r_min, r_max, p);
}
double4 mon = {posx, posy, posz, mass};
double im = 1.0/mon.w;
if(mon.w == 0) im = 0; //Allow tracer/massless particles
mon.x *= im;
mon.y *= im;
mon.z *= im;
//jbedorf, store darkMatterMass in Q1.w
//stellar mass is mon.w-darkMatterMass
double4 Q0, Q1;
Q0 = make_double4(oct_q11, oct_q22, oct_q33, maxEps); //Store max softening
Q1 = make_double4(oct_q12, oct_q13, oct_q23, 0.0f);
//Store the leaf properties
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole
multipole[3*nodeID + 2] = Q1; //Quadropole
//Store the node boundaries
nodeLowerBounds[nodeID] = make_float4(r_min.x, r_min.y, r_min.z, 0.0f);
nodeUpperBounds[nodeID] = make_float4(r_max.x, r_max.y, r_max.z, 1.0f); //4th parameter is set to 1 to indicate this is a leaf
return;
}
//Function goes level by level (starting from deepest) and computes
//the properties of the non-leaf nodes
KERNEL_DECLARE(compute_non_leaf)(const int curLevel, //Level for which we calc
uint *leafsIdxs, //Conversion of ids
uint *node_level_list, //Contains the start nodes of each lvl
uint *n_children, //Reference from node to first child and number of childs
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds){
CUXTIMER("compute_non_leaf");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
const int endNode = node_level_list[curLevel];
const int startNode = node_level_list[curLevel-1];
if(idx >= (endNode-startNode)) return;
const int nodeID = leafsIdxs[idx + startNode];
//Get the children info
const uint firstChild = n_children[nodeID] & 0x0FFFFFFF; //TODO make this name/define?
const uint nChildren = ((n_children[nodeID] & 0xF0000000) >> 28); //TODO make this name/define?
//Variables
double mass, posx, posy, posz;
mass = posx = posy = posz = 0.0;
double oct_q11, oct_q22, oct_q33;
double oct_q12, oct_q13, oct_q23;
oct_q11 = oct_q22 = oct_q33 = 0.0;
oct_q12 = oct_q13 = oct_q23 = 0.0;
float3 r_min, r_max;
r_min = make_float3(+1e10f, +1e10f, +1e10f);
r_max = make_float3(-1e10f, -1e10f, -1e10f);
//Process the children (1 to 8)
float maxEps = -100.0f;
for(int i=firstChild; i < firstChild+nChildren; i++)
{
//Gogo process this data!
double4 tmon = multipole[3*i + 0];
maxEps = max(multipole[3*i + 1].w, maxEps);
compute_monopole_node(mass, posx, posy, posz, tmon);
compute_quadropole_node(oct_q11, oct_q22, oct_q33, oct_q12, oct_q13, oct_q23,
multipole[3*i + 1], multipole[3*i + 2]);
compute_bounds_node(r_min, r_max, nodeLowerBounds[i], nodeUpperBounds[i]);
}
//Save the bounds
nodeLowerBounds[nodeID] = make_float4(r_min.x, r_min.y, r_min.z, 0.0f);
nodeUpperBounds[nodeID] = make_float4(r_max.x, r_max.y, r_max.z, 0.0f); //4th is set to 0 to indicate a non-leaf
//Regularize and store the results
double4 mon = {posx, posy, posz, mass};
double im = 1.0/mon.w;
if(mon.w == 0) im = 0; //Allow tracer/massless particles
mon.x *= im;
mon.y *= im;
mon.z *= im;
double4 Q0, Q1;
Q0 = make_double4(oct_q11, oct_q22, oct_q33, maxEps); //store max Eps
Q1 = make_double4(oct_q12, oct_q13, oct_q23, 0.0f);
multipole[3*nodeID + 0] = mon; //Monopole
multipole[3*nodeID + 1] = Q0; //Quadropole1
multipole[3*nodeID + 2] = Q1; //Quadropole2
return;
}
KERNEL_DECLARE(compute_scaling)(const int node_count,
double4 *multipole,
real4 *nodeLowerBounds,
real4 *nodeUpperBounds,
uint *n_children,
real4 *multipoleF,
float theta,
real4 *boxSizeInfo,
real4 *boxCenterInfo,
uint2 *node_bodies){
CUXTIMER("compute_scaling");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= node_count) return;
double4 monD, Q0, Q1;
monD = multipole[3*idx + 0]; //Monopole
Q0 = multipole[3*idx + 1]; //Quadropole1
Q1 = multipole[3*idx + 2]; //Quadropole2
//Scale the quadropole
double im = 1.0 / monD.w;
if(monD.w == 0) im = 0; //Allow tracer/massless particles
Q0.x = Q0.x*im - monD.x*monD.x;
Q0.y = Q0.y*im - monD.y*monD.y;
Q0.z = Q0.z*im - monD.z*monD.z;
Q1.x = Q1.x*im - monD.x*monD.y;
Q1.y = Q1.y*im - monD.y*monD.z;
Q1.z = Q1.z*im - monD.x*monD.z;
//Switch the y and z parameter
double temp = Q1.y;
Q1.y = Q1.z; Q1.z = temp;
//Convert the doubles to floats
float4 mon = make_float4(monD.x, monD.y, monD.z, monD.w);
multipoleF[3*idx + 0] = mon;
multipoleF[3*idx + 1] = make_float4(Q0.x, Q0.y, Q0.z, Q0.w); //Quadropole1
multipoleF[3*idx + 2] = make_float4(Q1.x, Q1.y, Q1.z, Q1.w); //Quadropole2
float4 r_min, r_max;
r_min = nodeLowerBounds[idx];
r_max = nodeUpperBounds[idx];
//Compute center and size of the box
float3 boxCenter;
boxCenter.x = 0.5*(r_min.x + r_max.x);
boxCenter.y = 0.5*(r_min.y + r_max.y);
boxCenter.z = 0.5*(r_min.z + r_max.z);
float3 boxSize = make_float3(fmaxf(fabs(boxCenter.x-r_min.x), fabs(boxCenter.x-r_max.x)),
fmaxf(fabs(boxCenter.y-r_min.y), fabs(boxCenter.y-r_max.y)),
fmaxf(fabs(boxCenter.z-r_min.z), fabs(boxCenter.z-r_max.z)));
//Calculate distance between center of the box and the center of mass
float3 s3 = make_float3((boxCenter.x - mon.x), (boxCenter.y - mon.y), (boxCenter.z - mon.z));
double s = sqrt((s3.x*s3.x) + (s3.y*s3.y) + (s3.z*s3.z));
//If mass-less particles form a node, the s would be huge in opening angle, make it 0
if(fabs(mon.w) < 1e-10) s = 0;
//Length of the box, note times 2 since we only computed half the distance before
float l = 2*fmaxf(boxSize.x, fmaxf(boxSize.y, boxSize.z));
//Store the box size and opening criteria
boxSizeInfo[idx].x = boxSize.x;
boxSizeInfo[idx].y = boxSize.y;
boxSizeInfo[idx].z = boxSize.z;
boxSizeInfo[idx].w = __int_as_float(n_children[idx]);
#if 1
boxCenterInfo[idx].x = boxCenter.x;
boxCenterInfo[idx].y = boxCenter.y;
boxCenterInfo[idx].z = boxCenter.z;
#else /* added by egaburov, see dev_approximate_gravity_warp.cu for matching code*/
boxCenterInfo[idx].x = mon.x;
boxCenterInfo[idx].y = mon.y;
boxCenterInfo[idx].z = mon.z;
#endif
//Extra check, shouldnt be necessary, probably it is otherwise the test for leaf can fail
//So it IS important Otherwise 0.0 < 0 can fail, now it will be: -1e-12 < 0
if(l < 0.000001)
l = 0.000001;
#ifdef IMPBH
float cellOp = (l/theta) + s;
#else
//Minimum distance method
float cellOp = (l/theta);
#endif
cellOp = cellOp*cellOp;
uint2 bij = node_bodies[idx];
uint pfirst = bij.x & ILEVELMASK;
uint nchild = bij.y - pfirst;
//If this is (leaf)node with only 1 particle then we change
//the opening criteria to a large number to force that the
//leaf will be opened and the particle data is used
//instead of an approximation.
//This because sometimes (mass*pos)*(1.0/mass) != pos
//even in full double precision
if(nchild == 1)
{
cellOp = 10e10; //Force this node to be opened
}
if(r_max.w > 0)
{
cellOp = -cellOp; //This is a leaf node
}
boxCenterInfo[idx].w = cellOp;
//Change the indirections of the leaf nodes so they point to
//the particle data
bool leaf = (r_max.w > 0);
if(leaf)
{
pfirst = pfirst | ((nchild-1) << LEAFBIT);
boxSizeInfo[idx].w = __int_as_float(pfirst);
}
return;
}
//Compute the properties for the groups
KERNEL_DECLARE(gpu_setPHGroupData)(const int n_groups,
const int n_particles,
real4 *bodies_pos,
int2 *group_list,
real4 *groupCenterInfo,
real4 *groupSizeInfo){
CUXTIMER("setPHGroupData");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if(bid >= n_groups) return;
//Do a reduction on the particles assigned to this group
volatile __shared__ float3 shmem[2*NCRIT];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[NCRIT];
float3 r_min = make_float3(+1e10f, +1e10f, +1e10f);
float3 r_max = make_float3(-1e10f, -1e10f, -1e10f);
int start = group_list[bid].x;
int end = group_list[bid].y;
int partIdx = start + threadIdx.x;
//Set the shared memory with the data
if (partIdx >= end)
{
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
}
else
{
sh_rmin[tid].x = r_min.x = bodies_pos[partIdx].x; sh_rmin[tid].y = r_min.y = bodies_pos[partIdx].y; sh_rmin[tid].z = r_min.z = bodies_pos[partIdx].z;
sh_rmax[tid].x = r_max.x = bodies_pos[partIdx].x; sh_rmax[tid].y = r_max.y = bodies_pos[partIdx].y; sh_rmax[tid].z = r_max.z = bodies_pos[partIdx].z;
}
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax2(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax2(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax2(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) {sh_MinMax2(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); }
if(blockDim.x >= 32) if (tid < 16) { sh_MinMax2(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); }
if(tid < 8)
{
sh_MinMax2(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compute the group center and size
float3 grpCenter;
grpCenter.x = 0.5*(r_min.x + r_max.x);
grpCenter.y = 0.5*(r_min.y + r_max.y);
grpCenter.z = 0.5*(r_min.z + r_max.z);
float3 grpSize = make_float3(fmaxf(fabs(grpCenter.x-r_min.x), fabs(grpCenter.x-r_max.x)),
fmaxf(fabs(grpCenter.y-r_min.y), fabs(grpCenter.y-r_max.y)),
fmaxf(fabs(grpCenter.z-r_min.z), fabs(grpCenter.z-r_max.z)));
//Store the box size and opening criteria
groupSizeInfo[bid].x = grpSize.x;
groupSizeInfo[bid].y = grpSize.y;
groupSizeInfo[bid].z = grpSize.z;
int nchild = end-start;
start = start | (nchild-1) << CRITBIT;
groupSizeInfo[bid].w = __int_as_float(start);
float l = max(grpSize.x, max(grpSize.y, grpSize.z));
groupCenterInfo[bid].x = grpCenter.x;
groupCenterInfo[bid].y = grpCenter.y;
groupCenterInfo[bid].z = grpCenter.z;
//Test stats for physical group size
groupCenterInfo[bid].w = l;
} //end tid == 0
}//end copyNode2grp
//Compute the properties for the groups
KERNEL_DECLARE(gpu_setPHGroupDataGetKey)(const int n_groups,
const int n_particles,
real4 *bodies_pos,
int2 *group_list,
real4 *groupCenterInfo,
real4 *groupSizeInfo,
uint4 *body_key,
float4 corner){
CUXTIMER("setPHGroupDataGetKey");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if(bid >= n_groups) return;
//Do a reduction on the particles assigned to this group
volatile __shared__ float3 shmem[2*NCRIT];
volatile float3 *sh_rmin = (float3*)&shmem [ 0];
volatile float3 *sh_rmax = (float3*)&shmem[NCRIT];
float3 r_min = make_float3(+1e10f, +1e10f, +1e10f);
float3 r_max = make_float3(-1e10f, -1e10f, -1e10f);
int start = group_list[bid].x;
int end = group_list[bid].y;
int partIdx = start + threadIdx.x;
//Set the shared memory with the data
if (partIdx >= end)
{
sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z;
sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z;
}
else
{
sh_rmin[tid].x = r_min.x = bodies_pos[partIdx].x; sh_rmin[tid].y = r_min.y = bodies_pos[partIdx].y; sh_rmin[tid].z = r_min.z = bodies_pos[partIdx].z;
sh_rmax[tid].x = r_max.x = bodies_pos[partIdx].x; sh_rmax[tid].y = r_max.y = bodies_pos[partIdx].y; sh_rmax[tid].z = r_max.z = bodies_pos[partIdx].z;
}
__syncthreads();
// do reduction in shared mem
if(blockDim.x >= 512) if (tid < 256) {sh_MinMax2(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 256) if (tid < 128) {sh_MinMax2(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 128) if (tid < 64) {sh_MinMax2(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads();
if(blockDim.x >= 64) if (tid < 32) {sh_MinMax2(tid, tid + 32, &r_min, &r_max, sh_rmin, sh_rmax); }
if(blockDim.x >= 32) if (tid < 16) { sh_MinMax2(tid, tid + 16, &r_min, &r_max, sh_rmin, sh_rmax); }
if(tid < 8)
{
sh_MinMax2(tid, tid + 8, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 4, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 2, &r_min, &r_max, sh_rmin, sh_rmax);
sh_MinMax2(tid, tid + 1, &r_min, &r_max, sh_rmin, sh_rmax);
}
// write result for this block to global mem
if (tid == 0)
{
//Compute the group center and size
float3 grpCenter;
grpCenter.x = 0.5*(r_min.x + r_max.x);
grpCenter.y = 0.5*(r_min.y + r_max.y);
grpCenter.z = 0.5*(r_min.z + r_max.z);
float3 grpSize = make_float3(fmaxf(fabs(grpCenter.x-r_min.x), fabs(grpCenter.x-r_max.x)),
fmaxf(fabs(grpCenter.y-r_min.y), fabs(grpCenter.y-r_max.y)),
fmaxf(fabs(grpCenter.z-r_min.z), fabs(grpCenter.z-r_max.z)));
//Store the box size and opening criteria
groupSizeInfo[bid].x = grpSize.x;
groupSizeInfo[bid].y = grpSize.y;
groupSizeInfo[bid].z = grpSize.z;
real4 pos = bodies_pos[start];
int nchild = end-start;
start = start | (nchild-1) << CRITBIT;
groupSizeInfo[bid].w = __int_as_float(start);
float l = max(grpSize.x, max(grpSize.y, grpSize.z));
groupCenterInfo[bid].x = grpCenter.x;
groupCenterInfo[bid].y = grpCenter.y;
groupCenterInfo[bid].z = grpCenter.z;
//Test stats for physical group size
groupCenterInfo[bid].w = l;
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
body_key[bid] = get_key(crd);
} //end tid == 0
}//end copyNode2grp
//Compute the key for the groups
KERNEL_DECLARE(gpu_setPHGroupDataGetKey2)(const int n_groups,
real4 *bodies_pos,
int2 *group_list,
uint4 *body_key,
float4 corner){
CUXTIMER("setPHGroupDataGetKey2");
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int idx = bid * (blockDim.x * blockDim.y) + tid;
if(idx >= n_groups) return;
int start = group_list[idx].x;
real4 pos = bodies_pos[start];
// int end = group_list[idx].y-1;
// real4 pos = bodies_pos[end];
// int end = group_list[idx].y-1;
// int start = group_list[idx].x;
// start = (end+start) / 2;
// real4 pos = bodies_pos[start];
int4 crd;
real domain_fac = corner.w;
#ifndef EXACT_KEY
crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac));
crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac));
crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac));
#else
crd.x = (int)((pos.x - corner.x) / domain_fac);
crd.y = (int)((pos.y - corner.y) / domain_fac);
crd.z = (int)((pos.z - corner.z) / domain_fac);
#endif
// uint2 key = get_key_morton(crd);
// body_key[idx] = make_uint4(key.x, key.y, 0,0);
body_key[idx] = get_key(crd); //has to be PH key in order to prevent the need for sorting
}//end copyNode2grp
|
9e7fb773729a13cc4161eff281bb7743c4f8e858.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/sync_batch_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/sync_batch_norm_utils.h"
namespace phi {
template <typename T, typename Context>
void SyncBatchNormKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &mean,
const DenseTensor &variance,
const DenseTensor &scale,
const DenseTensor &bias,
bool is_test,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *y,
DenseTensor *mean_out,
DenseTensor *variance_out,
DenseTensor *saved_mean,
DenseTensor *saved_variance,
DenseTensor *reserve_space) {
PADDLE_ENFORCE_EQ(use_global_stats,
false,
phi::errors::InvalidArgument(
"sync_batch_norm doesn't support "
"to set use_global_stats True. Please use batch_norm "
"in this case."));
double epsilon = epsilon_f;
const bool trainable_stats = trainable_statistics;
const DataLayout layout = phi::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_statistics);
const auto &x_dims = x.dims();
PADDLE_ENFORCE_GE(x_dims.size(),
2,
phi::errors::InvalidArgument(
"The Input dim size should be larger than 1."));
PADDLE_ENFORCE_LE(x_dims.size(),
5,
phi::errors::InvalidArgument(
"The Input dim size should be less than 6."));
int N, C, H, W, D;
funcs::ExtractNCWHD(x_dims, layout, &N, &C, &H, &W, &D);
int x_numel = x.numel();
const T *x_d = x.template data<T>();
const auto *s_d = scale.template data<BatchNormParamType<T>>();
const auto *b_d = bias.template data<BatchNormParamType<T>>();
T *y_d = ctx.template Alloc<T>(y);
const BatchNormParamType<T> *mean_data = nullptr;
const BatchNormParamType<T> *var_data = nullptr;
auto stream = ctx.stream();
const int block = 512;
int max_threads = ctx.GetMaxPhysicalThreadCount();
paddle::memory::AllocationPtr alloc_ptr{nullptr};
if (test_mode) {
mean_data = mean.template data<BatchNormParamType<T>>();
var_data = variance.template data<BatchNormParamType<T>>();
} else {
// x, x^2, 1, here 1 is used to calc device num
// device num also can be got from platform::DeviceContextPool
const int bytes = (C * 2 + 1) * sizeof(BatchNormParamType<T>);
alloc_ptr = phi::memory_utils::Alloc(
ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream())));
auto *stats = reinterpret_cast<BatchNormParamType<T> *>(alloc_ptr->ptr());
const int threads = 256;
int grid = ::min(C, (max_threads + threads - 1) / threads);
if (layout == phi::DataLayout::kNCHW) {
hipLaunchKernelGGL(( KeLocalStats<T, threads, phi::DataLayout::kNCHW>)
, dim3(grid), dim3(threads), 0, stream, x_d, N, H * W * D, C, stats);
} else {
hipLaunchKernelGGL(( KeLocalStats<T, threads, phi::DataLayout::kNHWC>)
, dim3(grid), dim3(threads), 0, stream, x_d, N, H * W * D, C, stats);
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
ncclComm_t comm = static_cast<ncclComm_t>(detail::GetCCLComm(x.place(), 0));
if (comm == nullptr) {
comm = ctx.nccl_comm();
}
if (comm) {
int dtype = paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(mean_out->dtype()));
// In-place operation
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
stats,
stats,
2 * C + 1,
static_cast<ncclDataType_t>(dtype),
ncclSum,
comm,
stream));
VLOG(3) << "Sync result using all reduce";
}
#endif
auto *est_mean_data = ctx.template Alloc<BatchNormParamType<T>>(mean_out);
auto *est_var_data =
ctx.template Alloc<BatchNormParamType<T>>(variance_out);
auto *sv_mean_data = ctx.template Alloc<BatchNormParamType<T>>(saved_mean);
auto *sv_inv_var_data =
ctx.template Alloc<BatchNormParamType<T>>(saved_variance);
// Note, Input('Mean')/Input('Variance') share variable with
// Output('MeanOut')/Output('VarianceOut')
hipLaunchKernelGGL(( KeSyncAndMovingStats<T>)
, dim3((C + block - 1) / block), dim3(block), 0, stream, stats,
stats + C,
stats + 2 * C,
C,
momentum,
epsilon,
sv_mean_data,
sv_inv_var_data,
est_mean_data,
est_var_data);
mean_data = sv_mean_data;
var_data = stats + C;
}
int grid2 = (::min(x_numel, max_threads) + block - 1) / block;
if (layout == phi::DataLayout::kNCHW) {
hipLaunchKernelGGL(( KeNormAffine<T, phi::DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, stream, x_d,
s_d,
b_d,
mean_data,
var_data,
epsilon,
C,
H * W * D,
x_numel,
y_d);
} else {
hipLaunchKernelGGL(( KeNormAffine<T, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, stream, x_d,
s_d,
b_d,
mean_data,
var_data,
epsilon,
C,
H * W * D,
x_numel,
y_d);
}
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(sync_batch_norm,
GPU,
ALL_LAYOUT,
phi::SyncBatchNormKernel,
float,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#else
PD_REGISTER_KERNEL(sync_batch_norm,
GPU,
ALL_LAYOUT,
phi::SyncBatchNormKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#endif
|
9e7fb773729a13cc4161eff281bb7743c4f8e858.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/sync_batch_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/sync_batch_norm_utils.h"
namespace phi {
template <typename T, typename Context>
void SyncBatchNormKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &mean,
const DenseTensor &variance,
const DenseTensor &scale,
const DenseTensor &bias,
bool is_test,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *y,
DenseTensor *mean_out,
DenseTensor *variance_out,
DenseTensor *saved_mean,
DenseTensor *saved_variance,
DenseTensor *reserve_space) {
PADDLE_ENFORCE_EQ(use_global_stats,
false,
phi::errors::InvalidArgument(
"sync_batch_norm doesn't support "
"to set use_global_stats True. Please use batch_norm "
"in this case."));
double epsilon = epsilon_f;
const bool trainable_stats = trainable_statistics;
const DataLayout layout = phi::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_statistics);
const auto &x_dims = x.dims();
PADDLE_ENFORCE_GE(x_dims.size(),
2,
phi::errors::InvalidArgument(
"The Input dim size should be larger than 1."));
PADDLE_ENFORCE_LE(x_dims.size(),
5,
phi::errors::InvalidArgument(
"The Input dim size should be less than 6."));
int N, C, H, W, D;
funcs::ExtractNCWHD(x_dims, layout, &N, &C, &H, &W, &D);
int x_numel = x.numel();
const T *x_d = x.template data<T>();
const auto *s_d = scale.template data<BatchNormParamType<T>>();
const auto *b_d = bias.template data<BatchNormParamType<T>>();
T *y_d = ctx.template Alloc<T>(y);
const BatchNormParamType<T> *mean_data = nullptr;
const BatchNormParamType<T> *var_data = nullptr;
auto stream = ctx.stream();
const int block = 512;
int max_threads = ctx.GetMaxPhysicalThreadCount();
paddle::memory::AllocationPtr alloc_ptr{nullptr};
if (test_mode) {
mean_data = mean.template data<BatchNormParamType<T>>();
var_data = variance.template data<BatchNormParamType<T>>();
} else {
// x, x^2, 1, here 1 is used to calc device num
// device num also can be got from platform::DeviceContextPool
const int bytes = (C * 2 + 1) * sizeof(BatchNormParamType<T>);
alloc_ptr = phi::memory_utils::Alloc(
ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream())));
auto *stats = reinterpret_cast<BatchNormParamType<T> *>(alloc_ptr->ptr());
const int threads = 256;
int grid = std::min(C, (max_threads + threads - 1) / threads);
if (layout == phi::DataLayout::kNCHW) {
KeLocalStats<T, threads, phi::DataLayout::kNCHW>
<<<grid, threads, 0, stream>>>(x_d, N, H * W * D, C, stats);
} else {
KeLocalStats<T, threads, phi::DataLayout::kNHWC>
<<<grid, threads, 0, stream>>>(x_d, N, H * W * D, C, stats);
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
ncclComm_t comm = static_cast<ncclComm_t>(detail::GetCCLComm(x.place(), 0));
if (comm == nullptr) {
comm = ctx.nccl_comm();
}
if (comm) {
int dtype = paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(mean_out->dtype()));
// In-place operation
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
stats,
stats,
2 * C + 1,
static_cast<ncclDataType_t>(dtype),
ncclSum,
comm,
stream));
VLOG(3) << "Sync result using all reduce";
}
#endif
auto *est_mean_data = ctx.template Alloc<BatchNormParamType<T>>(mean_out);
auto *est_var_data =
ctx.template Alloc<BatchNormParamType<T>>(variance_out);
auto *sv_mean_data = ctx.template Alloc<BatchNormParamType<T>>(saved_mean);
auto *sv_inv_var_data =
ctx.template Alloc<BatchNormParamType<T>>(saved_variance);
// Note, Input('Mean')/Input('Variance') share variable with
// Output('MeanOut')/Output('VarianceOut')
KeSyncAndMovingStats<T>
<<<(C + block - 1) / block, block, 0, stream>>>(stats,
stats + C,
stats + 2 * C,
C,
momentum,
epsilon,
sv_mean_data,
sv_inv_var_data,
est_mean_data,
est_var_data);
mean_data = sv_mean_data;
var_data = stats + C;
}
int grid2 = (std::min(x_numel, max_threads) + block - 1) / block;
if (layout == phi::DataLayout::kNCHW) {
KeNormAffine<T, phi::DataLayout::kNCHW>
<<<grid2, block, 0, stream>>>(x_d,
s_d,
b_d,
mean_data,
var_data,
epsilon,
C,
H * W * D,
x_numel,
y_d);
} else {
KeNormAffine<T, phi::DataLayout::kNHWC>
<<<grid2, block, 0, stream>>>(x_d,
s_d,
b_d,
mean_data,
var_data,
epsilon,
C,
H * W * D,
x_numel,
y_d);
}
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(sync_batch_norm,
GPU,
ALL_LAYOUT,
phi::SyncBatchNormKernel,
float,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#else
PD_REGISTER_KERNEL(sync_batch_norm,
GPU,
ALL_LAYOUT,
phi::SyncBatchNormKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->InputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->InputAt(4).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(3).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(4).SetDataType(phi::DataType::FLOAT32);
}
}
#endif
|
38e442ad39e838cbd8d670655c36aba02ca4780f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyCudaUtils.h"
#include "Kernels.h"
#include "Wrappers.h"
__device__ float camPams[CAMPAMSZ];
__device__ int imSizes[IMPAMSZ];
__device__ int pt_matches;
__device__ float imStats[64 * 3];
__device__ unsigned int num_correp[4];
#define TOTAL_IDX 0
#define FOUND_IDX 1
#define FAILED_DIST_IDX 2
#define FAILED_ANGLE_IDX 3
// Project a cloud to 3D
__global__ void ProjectKernel(unsigned short* dimage_new, unsigned short* dimage_old, float* cloud_new, float* cloud_old, int num_pts) {
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= num_pts)
return;
int col = pt_idx % imSizes[DNX];
int row = roundf((pt_idx - col) / imSizes[DNX]);
unsigned short depth_new = dimage_new[pt_idx];
unsigned short depth_old = dimage_old[pt_idx];
// Print statement for debugging to check depth being accessed correctly
//printf("Row, col: %d,%d Depth_new: %d Depth_old: %d\n", row, col, depth_new, depth_old);
// Project the point in the new depth image
if (depth_new > 0) {
cloud_new[pt_idx * 3 + 2] = depth_new * 0.001f;
cloud_new[pt_idx * 3 + 0] = static_cast<float> (col - camPams[DCX]) * cloud_new[pt_idx * 3 + 2] * (1 / camPams[DFX]);
cloud_new[pt_idx * 3 + 1] = static_cast<float> (row - camPams[DCY]) * cloud_new[pt_idx * 3 + 2] * (1 / camPams[DFY]);
//printf("New Cloud: (row,col) (x,y,z): (%d, %d) (%f,%f,%f)\n", row, col, cloud_new[pt_idx * 3 + 0], cloud_new[pt_idx * 3 + 1], cloud_new[pt_idx * 3 + 2]);
}
else {
cloud_new[pt_idx * 3 + 2] = -1.0f; // Negative z coordinate to show invalid
cloud_new[pt_idx * 3 + 0] = 0.0f;
cloud_new[pt_idx * 3 + 1] = 0.0f;
}
// Print statement for debugging to check point projection is correct
//printf("New Cloud: (row,col) (x,y,z): (%d, %d) (%f,%f,%f)\n", row, col, cloud_new[pt_idx * 3 + 0], cloud_new[pt_idx * 3 + 1], cloud_new[pt_idx * 3 + 2]);
// Project the point in the old depth image
if (depth_old > 0){
cloud_old[pt_idx * 3 + 2] = depth_old * 0.001f;
cloud_old[pt_idx * 3 + 0] = static_cast<float> (col - camPams[DCX]) * cloud_old[pt_idx * 3 + 2] * (1 / camPams[DFX]);
cloud_old[pt_idx * 3 + 1] = static_cast<float> (row - camPams[DCY]) * cloud_old[pt_idx * 3 + 2] * (1 / camPams[DFY]);
}
else {
cloud_old[pt_idx * 3 + 2] = -1.0f; // Negative z coordinate to show invalid
cloud_old[pt_idx * 3 + 0] = 0.0f;
cloud_old[pt_idx * 3 + 1] = 0.0f;
}
}
__device__ inline void VecMinus(float vecA[3], float vecB[3], float vec_out[3]) {
vec_out[0] = vecA[0] - vecB[0];
vec_out[1] = vecA[1] - vecB[1];
vec_out[2] = vecA[2] - vecB[2];
}
__device__ inline void VecCross(float vecA[3], float vecB[3], float vec_out[3]) {
vec_out[0] = vecA[1] * vecB[2] - vecA[2] * vecB[1];
vec_out[1] = vecA[2] * vecB[0] - vecA[0] * vecB[2];
vec_out[2] = vecA[0] * vecB[1] - vecA[1] * vecB[0];
}
__device__ inline float VecNorm(float vec[3]) {
return sqrtf(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
}
__device__ inline void VecNormalise(float vec_in[3], float vec_out[3]) {
float norm = VecNorm(vec_in);
vec_out[0] = vec_in[0] / norm;
vec_out[1] = vec_in[1] / norm;
vec_out[2] = vec_in[2] / norm;
}
__device__ inline float VecDot(float vecA[3], float vecB[3]) {
return vecA[0] * vecB[0] + vecA[1] * vecB[1] + vecA[2] * vecB[2];
}
// Cloud is required to be stored row major order of points i.e. for Pt_Row,Col = (x,y,z); cloud = [Pt_0,0, Pt_0,1, Pt_0,2,...Pt_0,n, Pt_1,,
__global__ void ComputeNormalsKernel(float* cloud, float* normals, int num_pts) {
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= num_pts)
return;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
int col = pt_idx % imSizes[DNX];
int row = roundf((pt_idx - col) / imSizes[DNX]);
float pt[3] = { 0.0 };
float pt_1col[3] = { 0.0 }; // Offset from pt by one column
float pt_1row[3] = { 0.0 }; // Offset from pt by one row
pt[0] = cloud[pt_idx * 3 + 0];
pt[1] = cloud[pt_idx * 3 + 1];
pt[2] = cloud[pt_idx * 3 + 2];
bool normError = false;
// Check whether we are at the border
if ((col == (num_cols - 1)) || (row == (num_rows - 1)))
normError = true;
if (!normError) {
pt_1col[0] = cloud[(pt_idx + 1) * 3 + 0];
pt_1col[1] = cloud[(pt_idx + 1) * 3 + 1];
pt_1col[2] = cloud[(pt_idx + 1) * 3 + 2];
pt_1row[0] = cloud[(pt_idx + num_cols) * 3 + 0];
pt_1row[1] = cloud[(pt_idx + num_cols) * 3 + 1];
pt_1row[2] = cloud[(pt_idx + num_cols) * 3 + 2];
}
// Check the three have valid depth readings
if (pt[2] < 0 || pt_1col[2] < 0 || pt_1row[2] < 0)
normError = true;
// Compute normal through local vector cross product
if (!normError) {
float vecRow[3], vecCol[3], normal[3], norm_normal[3];
VecMinus(pt_1col, pt, vecCol);
VecMinus(pt_1row, pt, vecRow);
VecCross(vecCol, vecRow, normal);
VecNormalise(normal, norm_normal);
normals[pt_idx * 3 + 0] = norm_normal[0];
normals[pt_idx * 3 + 1] = norm_normal[1];
normals[pt_idx * 3 + 2] = norm_normal[2];
}
// Set normal to greater than unit magnitude to show error computing normal
if (normError) {
normals[pt_idx * 3 + 0] = 2.0f;
normals[pt_idx * 3 + 1] = 2.0f;
normals[pt_idx * 3 + 2] = 2.0f;
}
}
// Finds a correspondance in the new cloud for every point in the old cloud
__global__ void ProjectiveAssociationKernel(float* trans_cloud_old, float* trans_normals_old, float* cloud_new, float* normals_new, int* correp_mask, int num_pts) {
int pt_old_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_old_idx >= num_pts)
return;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
float old_point[3], new_point[3], old_normal[3], new_normal[3];
old_point[0] = trans_cloud_old[pt_old_idx * 3 + 0];
old_point[1] = trans_cloud_old[pt_old_idx * 3 + 1];
old_point[2] = trans_cloud_old[pt_old_idx * 3 + 2];
old_normal[0] = trans_normals_old[pt_old_idx * 3 + 0];
old_normal[1] = trans_normals_old[pt_old_idx * 3 + 1];
old_normal[2] = trans_normals_old[pt_old_idx * 3 + 2];
bool correp_valid = true;
// Check old point has valid depth and old normal is valid unit vector
if (old_point[2] < 0 || old_normal[0] > 1.0)
correp_valid = false;
// Find where the transformed old 3D point projects to in new frame
int col_proj_new, row_proj_new;
if (correp_valid) {
float invZ = 1.0 / old_point[2];
col_proj_new = roundf((old_point[0] * camPams[DFX] * invZ) + camPams[DCX]);
row_proj_new = roundf((old_point[1] * camPams[DFY] * invZ) + camPams[DCY]);
// Check reprojection falls into valid image coordinates
if (col_proj_new < 0 || row_proj_new < 0 || col_proj_new >= num_cols || row_proj_new >= num_rows)
correp_valid = false;
}
// Get the new point and normal
int pt_new_idx;
if (correp_valid){
pt_new_idx = col_proj_new + row_proj_new*num_cols;
new_point[0] = cloud_new[pt_new_idx * 3 + 0];
new_point[1] = cloud_new[pt_new_idx * 3 + 1];
new_point[2] = cloud_new[pt_new_idx * 3 + 2];
new_normal[0] = normals_new[pt_new_idx * 3 + 0];
new_normal[1] = normals_new[pt_new_idx * 3 + 1];
new_normal[2] = normals_new[pt_new_idx * 3 + 2];
// Check the new point and normal is valid
if (new_point[2] < 0 || new_normal[0] > 1.0)
correp_valid = false;
else
atomicAdd(&num_correp[FOUND_IDX], 1);
}
// Check for valid correspondance by euclidean distance and angle thresholds
if (correp_valid) {
float distVec[3];
VecMinus(new_point, old_point, distVec);
float dist = VecNorm(distVec);
float angle = fabsf(VecDot(new_normal, old_normal));
if (dist > 0.1) {
correp_valid = false;
atomicAdd(&num_correp[FAILED_DIST_IDX], 1);
//if (pt_old_idx == 102650 || pt_old_idx == 102660) {
// printf("Old Pt %d trans to: (%.3f, %.3f, %.3f), projects to: (%d, %d) with correp new point (%.3f, %.3f, %.3f) and dist %f\n", pt_old_idx, old_point[0], old_point[1], old_point[2], col_proj_new, row_proj_new,
// new_point[0], new_point[1], new_point[2], dist);
//}
// Print statements for debugging to check projective association validity
}
//else {
// printf("Successful correspondance");
//}
}
// Mark a valid correspondance, or not
if (correp_valid) {
correp_mask[pt_old_idx] = pt_new_idx;
atomicAdd(&num_correp[TOTAL_IDX], 1);
}
else {
correp_mask[pt_old_idx] = -1;
}
}
__global__ void ComputeErrorFunc(float* source_old, float *dest_new, float *normals_new, int* matches_mask, float* A_mat, float* B_mat) {
int col_idx = threadIdx.x + blockIdx.x * blockDim.x;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
int src_pt_idx = 0, dest_pt_idx = 0;
float src_pt[3], dest_pt[3], norm[3], cross[3], diff[3], dot;
float BMat_sum[6] = { 0.0 };
float AMat_sum[36] = { 0.0 };
int matches_this_col = 0;
for (int row = 0; row < num_rows; row++) {
src_pt_idx = col_idx + row * num_cols;
dest_pt_idx = matches_mask[src_pt_idx];
if (dest_pt_idx == -1)
continue;
matches_this_col++;
src_pt[0] = source_old[src_pt_idx * 3 + 0];
src_pt[1] = source_old[src_pt_idx * 3 + 1];
src_pt[2] = source_old[src_pt_idx * 3 + 2];
dest_pt[0] = dest_new[dest_pt_idx * 3 + 0];
dest_pt[1] = dest_new[dest_pt_idx * 3 + 1];
dest_pt[2] = dest_new[dest_pt_idx * 3 + 2];
norm[0] = normals_new[dest_pt_idx * 3 + 0];
norm[1] = normals_new[dest_pt_idx * 3 + 1];
norm[2] = normals_new[dest_pt_idx * 3 + 2];
VecCross(src_pt, norm, cross);
VecMinus(src_pt, dest_pt, diff);
dot = VecDot(diff, norm);
// Add to the B matrix
for (int i = 0; i < 3; i++) {
BMat_sum[i] -= cross[i] * dot;
BMat_sum[i + 3] -= norm[i] * dot;
}
// Storing column-major
for (int i = 0; i < 3; i++) {
float multTop = cross[i];
float multBot = norm[i];
for (int j = 0; j < 3; j++) {
AMat_sum[i + 6 * j] += multTop*cross[j];
AMat_sum[i + 6 * (j + 3)] += multTop*norm[j];
AMat_sum[i + 3 + 6 * j] += multBot*cross[j];
AMat_sum[i + 3 + 6 * (j + 3)] += multBot*norm[j];
}
}
// Print statements for debugging to check the computation of the error function
//if (col_idx == 200 && (matches_this_col == 170 || matches_this_col == 171)) {
// printf("Src_pt: %f, %f, %f\n", src_pt[0], src_pt[1], src_pt[2]);
// printf("Dest_pt: %f, %f, %f\n", dest_pt[0], dest_pt[1], dest_pt[2]);
// printf("Norm: %f, %f, %f\n", norm[0], norm[1], norm[2]);
// printf("Cross: %f, %f, %f\n", cross[0], cross[1], cross[2]);
// printf("Dot: %f\n", dot);
// printf("BMat: \n");
// for (int i = 0; i < 6; i++) {
// printf("%f\n", BMat_sum[i]);
// }
// printf("\nAmat:\n");
// for (int i = 0; i < 6; i++) {
// for (int j = 0; j < 6; j++) {
// printf("%f, ", AMat_sum[j * 6 + i]);
// }
// printf("\n");
// }
// printf("\n");
//}
}
// Copy back to global array
for (int i = 0; i < 36; i++) {
A_mat[col_idx * 36 + i] = AMat_sum[i];
}
for (int i = 0; i < 6; i++) {
B_mat[col_idx * 6 + i] = BMat_sum[i];
}
}
void RfromT(float* T_in, float* R_out) {
for (int i = 0; i < 16; i++) {
R_out[i] = T_in[i];
}
R_out[12] = R_out[13] = R_out[14] = 0.0f;
}
void MatMult(float* M1, float* M2, float* M_out, int rows1, int cols1, int cols2) {
float sum = 0;
for (int c = 0; c < rows1; c++)
{
for (int d = 0; d < cols2; d++)
{
for (int k = 0; k < cols1; k++)
{
sum = sum + M1[k*cols1 + c] * M2[d*cols2 + k];
}
M_out[d*cols2 + c] = sum;
sum = 0;
}
}
}
// THIS FUNCTION IS HARD CODED. USE WITH CAUTION
__global__ void ComputeDepthImStats(unsigned short* dimage) {
int col_idx = threadIdx.x;
int start_pt = blockIdx.y * 8 * 512 + blockIdx.x * 64 + col_idx;
__shared__ float sumArray[64];
__shared__ float varArray[64];
__shared__ int validArray[64];
__shared__ float mean;
__shared__ int valid;
sumArray[col_idx] = 0.0;
varArray[col_idx] = 0.0;
validArray[col_idx] = 0;
for (int i = 0; i < 53; i++) {
unsigned short d = dimage[start_pt + 512 * i];
if (d > 0) {
sumArray[col_idx] += (float)dimage[start_pt + 512 * i];
validArray[col_idx]++;
}
}
__syncthreads();
if (col_idx == 0) {
float sum = 0;
valid = 0;
for (int i = 0; i < 64; i++) {
sum += sumArray[i];
valid += validArray[i];
}
if (valid == 0) mean = 0;
else mean = sum / (valid);
}
__syncthreads();
for (int i = 0; i < 53; i++) {
unsigned short d = dimage[start_pt + 512 * i];
if (d > 0) {
varArray[col_idx] += (d - mean)* (d - mean);
}
}
__syncthreads();
if (col_idx == 0) {
float var = 0;
for (int i = 0; i < 64; i++) var += varArray[i];
if (valid == 0) var = 0;
else var /= (valid);
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 0] = mean;
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 1] = var;
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 2] = valid;
}
}
void MakeTransIdentity(float* T) {
for (int i = 0; i < 16; i++) { T[i] = 0; }
T[0] = T[5] = T[10] = T[15] = 1.0;
}
hipError_t MyCudaUtils::DenseICP(cv::Mat& dimage_new, cv::Mat& dimage_old, float* H_camPams, int* H_imSizes, float* T_hint, float depthStats[128]) {
assert(dimage_old.type() == CV_16U && dimage_new.type() == CV_16U);
assert(dimage_old.size == dimage_new.size);
int num_pts = dimage_new.rows * dimage_new.cols;
bool drawDepthMaps = false;
if (drawDepthMaps) DrawDepthMaps(dimage_new, dimage_old);
// Since cv::Mat is row major this actually performs the same job as column-major
// points-in-columns eigen matrix
cv::Mat normals_new(num_pts, 3, CV_32F);
cv::Mat cloud_new(num_pts, 3, CV_32F);
cv::Mat cloud_old(num_pts, 3, CV_32F);
cv::Mat mapped_cloud_old(num_pts, 3, CV_32F);
cv::Mat correp_mask(dimage_new.rows, dimage_new.cols, CV_32S);
// Device memory pointers
float *D_cloud_new, *D_cloud_old;
unsigned short *D_dimage_new, *D_dimage_old;
float *D_normals_new, *D_normals_old;
float *D_trans, *D_rot;
float *D_mapped_cloud_old, *D_mapped_normals_old;
int *D_correp_mask;
float *D_A_mat, *D_B_mat;
// Output transform
float T_curr[16] = { 0.0 };
bool drawTransCloud = false;
bool drawTransNormals = false;
bool drawInitProjection = false;
bool drawInitNormals = false;
bool drawCorrep = false;
try {
// Allocate for the clouds
CUDA_MALLOC(D_cloud_new, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_cloud_old, num_pts * 3 * sizeof(float));
// For the correspondance mask
CUDA_MALLOC(D_correp_mask, num_pts * sizeof(int));
// Copy across the depth images
CUDA_INIT_MEM(D_dimage_new, dimage_new.data, num_pts * sizeof(unsigned short));
CUDA_INIT_MEM(D_dimage_old, dimage_old.data, num_pts * sizeof(unsigned short));
// Allocate for the normal map
CUDA_MALLOC(D_normals_new, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_normals_old, num_pts * 3 * sizeof(float));
// For old cloud and normals transformed into estimated new camera space
CUDA_MALLOC(D_mapped_cloud_old, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_mapped_normals_old, num_pts * 3 * sizeof(float));
// Camera parameters for projection
CUDA_CHECK(hipMemcpyToSymbol(camPams, H_camPams, CAMPAMSZ * sizeof(float)));
CUDA_CHECK(hipMemcpyToSymbol(imSizes, H_imSizes, IMPAMSZ * sizeof(int)));
// A and B matrices summed across each column by a single GPU thread
CUDA_MALLOC(D_A_mat, dimage_new.cols * 36 * sizeof(float));
CUDA_MALLOC(D_B_mat, dimage_new.cols * 6 * sizeof(float));
// Perform the projection of both clouds
int blocks = cvCeil(num_pts / 128.0f);
ProjectKernel << < blocks, 128 >> >(D_dimage_new, D_dimage_old, D_cloud_new, D_cloud_old, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
// For debugging - check the projection of the dense clouds
if (drawInitProjection) {
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_old.data, D_cloud_old, num_pts*sizeof(float)* 3);
DrawClouds((float *)cloud_new.data, num_pts, (float*)cloud_old.data, 0.8, 3.0);
}
// Compute the normals
ComputeNormalsKernel << < blocks, 128 >> >(D_cloud_new, D_normals_new, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
ComputeNormalsKernel << < blocks, 128 >> >(D_cloud_old, D_normals_old, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
// For debugging, check the computation of the normals
if (drawInitNormals) {
CUDA_DOWNLOAD(normals_new.data, D_normals_new, num_pts * 3 * sizeof(float));
DrawNormals((float*) cloud_new.data, num_pts, (float*) normals_new.data, 0.08, 0.26);
}
// Compute statistics for the depth image. For an equally sized 8x8 grid
// compute the mean and std_dev of the depth pixels
ComputeDepthImStats << <dim3(8, 8, 1), 64 >> >(D_dimage_new);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpyFromSymbol(depthStats, imStats, 192 * sizeof(float)));
// Perform the initial transformation of old points and normals to new frame
//T_hint[12] = -0.02;
//T_hint[13] = -0.02;
//T_hint[14] = 0.02;
CUDA_INIT_MEM(D_trans, T_hint, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_INIT_MEM(D_rot, R, 16 * sizeof(float));
float deltaX = 100;
float stateVec[6] = { 10.0 };
for (int i = 0; i< 16; i++) T_curr[i] = T_hint[i];
int iter = 0;
char file[256];
// Begin the loop of iterating to completion
int maxIter = 30;
bool failed_hint = false;
while (deltaX > 1e-8 && iter++ < maxIter) {
// Transform old points and normals by current estimate
hipLaunchKernelGGL(( TransformKernel), dim3(blocks), dim3(128) , 0, 0, D_cloud_old, D_trans, D_mapped_cloud_old, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( TransformKernel) , dim3(blocks), dim3(128) , 0, 0, D_normals_old, D_rot, D_mapped_normals_old, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
// For debugging, draw transformed old cloud and the two on top of each other
if (drawTransCloud) {
CUDA_DOWNLOAD(mapped_cloud_old.data, D_mapped_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts *sizeof(float)* 3);
sprintf_s(file, "Iter%d.png", iter-1);
DrawClouds((float*)cloud_new.data, num_pts, (float*)mapped_cloud_old.data, 0.8, 3.0, file);
// For saving screenshots
//sprintf_s(file, "Iter%d.png", iter++);
}
// For debugging, draws the transformed normals
if (drawTransNormals) {
CUDA_DOWNLOAD(normals_new.data, D_mapped_normals_old, num_pts * 3 * sizeof(float));
DrawNormals((float*)cloud_new.data, num_pts, (float*)normals_new.data, 0.08, 0.26);
}
// Get the correspondance map through projective data assocation
int host_num_correp[4] = { 0 };
CUDA_CHECK(hipMemcpyToSymbol(num_correp, &host_num_correp, 4*sizeof(int)));
ProjectiveAssociationKernel << <blocks, 128 >> >(D_mapped_cloud_old, D_mapped_normals_old, D_cloud_new, D_normals_new, D_correp_mask, num_pts);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpyFromSymbol( &host_num_correp, num_correp, 4*sizeof(int)));
if (drawCorrep ) {
CUDA_DOWNLOAD(mapped_cloud_old.data, D_mapped_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts *sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_old.data, D_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(correp_mask.data, D_correp_mask, num_pts * sizeof(int));
DrawCorrep((float*)mapped_cloud_old.data, (float*)cloud_new.data, num_pts, (int*)correp_mask.data);
}
// Hint might be bad, try restarting with identity
if (host_num_correp[0] < 100 && failed_hint == false) {
failed_hint = true;
MakeTransIdentity(T_curr);
CUDA_UPLOAD(D_trans, T_curr, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_UPLOAD(D_rot, R, 16 * sizeof(float));
continue;
}
// Hint and identity failed, ICP has failed and return identity
else if (host_num_correp[0] < 100 && failed_hint == true) {
MakeTransIdentity(T_curr);
break;
}
//QueryPoint( correp_mask);
// Compute the Ax - b = 0 error function
ComputeErrorFunc << <16, 32 >> >(D_mapped_cloud_old, D_cloud_new, D_normals_new, D_correp_mask, D_A_mat, D_B_mat);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
float* H_A = new float[dimage_new.cols * 36];
float* H_B = new float[dimage_new.cols * 6];
CUDA_DOWNLOAD(H_A, D_A_mat, dimage_new.cols * 36 * sizeof(float));
CUDA_DOWNLOAD(H_B, D_B_mat, dimage_new.cols * 6 * sizeof(float));
// Solve
float newStateVec[6];
FindMinimisingTransform(H_A, H_B, dimage_new.cols, newStateVec);
delete[] H_A;
delete[] H_B;
// Compute amount of change in x - break if small
deltaX = 0.0;
for (int i = 0; i < 6; i++)
deltaX += (newStateVec[0])*(newStateVec[0]);
for (int i = 0; i < 6; i++) stateVec[i] = newStateVec[i];
// Compute equivalent transform and compound with existing estimate
float alp = stateVec[0], bet = stateVec[1], gam = stateVec[2];
float tx = stateVec[3], ty = stateVec[4], tz = stateVec[5];
float T[16] = { 0.0 };
T[0] = cos(gam)*cos(bet);
T[1] = sin(gam)*cos(bet);
T[2] = -sin(bet);
T[4] = -sin(gam)*cos(alp) + cos(gam)*sin(bet)*sin(alp);
T[5] = cos(gam)*cos(alp) + sin(gam)*sin(bet)*sin(alp);
T[6] = cos(bet)*sin(alp);
T[8] = sin(gam)*sin(alp) + cos(gam)*sin(bet)*cos(alp);
T[9] = -cos(gam)*sin(alp) + sin(gam)*sin(bet)*cos(alp);
T[10] = cos(bet)*cos(alp);
T[12] = tx; T[13] = ty; T[14] = tz;
T[15] = 1.0;
float T_temp[16];
MatMult(T_curr, T, T_temp, 4, 4, 4);
for (int i = 0; i < 16; i++) T_curr[i] = T_temp[i];
CUDA_UPLOAD(D_trans, T_curr, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_UPLOAD(D_rot, R, 16 * sizeof(float));
}
// Did not converge, return Identity
if (iter == maxIter) {
for (int i = 0; i < 16; i++) { T_curr[i] = 0; }
T_curr[0] = T_curr[5] = T_curr[10] = T_curr[15] = 1.0;
}
throw(hipSuccess);
}
catch (hipError_t cudaStatus) {
hipFree(D_cloud_new);
hipFree(D_cloud_old);
hipFree(D_dimage_new);
hipFree(D_dimage_old);
hipFree(D_normals_new);
hipFree(D_normals_old);
hipFree(D_mapped_cloud_old);
hipFree(D_mapped_normals_old);
hipFree(D_correp_mask);
hipFree(D_trans);
hipFree(D_rot);
hipFree(D_A_mat);
hipFree(D_B_mat);
for (int i = 0; i < 16; i++) T_hint[i] = T_curr[i];
return cudaStatus;
}
// Create normal map for the new depth image
// Associate points by
}
|
38e442ad39e838cbd8d670655c36aba02ca4780f.cu
|
#include "MyCudaUtils.h"
#include "Kernels.h"
#include "Wrappers.h"
__device__ float camPams[CAMPAMSZ];
__device__ int imSizes[IMPAMSZ];
__device__ int pt_matches;
__device__ float imStats[64 * 3];
__device__ unsigned int num_correp[4];
#define TOTAL_IDX 0
#define FOUND_IDX 1
#define FAILED_DIST_IDX 2
#define FAILED_ANGLE_IDX 3
// Project a cloud to 3D
__global__ void ProjectKernel(unsigned short* dimage_new, unsigned short* dimage_old, float* cloud_new, float* cloud_old, int num_pts) {
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= num_pts)
return;
int col = pt_idx % imSizes[DNX];
int row = roundf((pt_idx - col) / imSizes[DNX]);
unsigned short depth_new = dimage_new[pt_idx];
unsigned short depth_old = dimage_old[pt_idx];
// Print statement for debugging to check depth being accessed correctly
//printf("Row, col: %d,%d Depth_new: %d Depth_old: %d\n", row, col, depth_new, depth_old);
// Project the point in the new depth image
if (depth_new > 0) {
cloud_new[pt_idx * 3 + 2] = depth_new * 0.001f;
cloud_new[pt_idx * 3 + 0] = static_cast<float> (col - camPams[DCX]) * cloud_new[pt_idx * 3 + 2] * (1 / camPams[DFX]);
cloud_new[pt_idx * 3 + 1] = static_cast<float> (row - camPams[DCY]) * cloud_new[pt_idx * 3 + 2] * (1 / camPams[DFY]);
//printf("New Cloud: (row,col) (x,y,z): (%d, %d) (%f,%f,%f)\n", row, col, cloud_new[pt_idx * 3 + 0], cloud_new[pt_idx * 3 + 1], cloud_new[pt_idx * 3 + 2]);
}
else {
cloud_new[pt_idx * 3 + 2] = -1.0f; // Negative z coordinate to show invalid
cloud_new[pt_idx * 3 + 0] = 0.0f;
cloud_new[pt_idx * 3 + 1] = 0.0f;
}
// Print statement for debugging to check point projection is correct
//printf("New Cloud: (row,col) (x,y,z): (%d, %d) (%f,%f,%f)\n", row, col, cloud_new[pt_idx * 3 + 0], cloud_new[pt_idx * 3 + 1], cloud_new[pt_idx * 3 + 2]);
// Project the point in the old depth image
if (depth_old > 0){
cloud_old[pt_idx * 3 + 2] = depth_old * 0.001f;
cloud_old[pt_idx * 3 + 0] = static_cast<float> (col - camPams[DCX]) * cloud_old[pt_idx * 3 + 2] * (1 / camPams[DFX]);
cloud_old[pt_idx * 3 + 1] = static_cast<float> (row - camPams[DCY]) * cloud_old[pt_idx * 3 + 2] * (1 / camPams[DFY]);
}
else {
cloud_old[pt_idx * 3 + 2] = -1.0f; // Negative z coordinate to show invalid
cloud_old[pt_idx * 3 + 0] = 0.0f;
cloud_old[pt_idx * 3 + 1] = 0.0f;
}
}
__device__ inline void VecMinus(float vecA[3], float vecB[3], float vec_out[3]) {
vec_out[0] = vecA[0] - vecB[0];
vec_out[1] = vecA[1] - vecB[1];
vec_out[2] = vecA[2] - vecB[2];
}
__device__ inline void VecCross(float vecA[3], float vecB[3], float vec_out[3]) {
vec_out[0] = vecA[1] * vecB[2] - vecA[2] * vecB[1];
vec_out[1] = vecA[2] * vecB[0] - vecA[0] * vecB[2];
vec_out[2] = vecA[0] * vecB[1] - vecA[1] * vecB[0];
}
__device__ inline float VecNorm(float vec[3]) {
return sqrtf(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
}
__device__ inline void VecNormalise(float vec_in[3], float vec_out[3]) {
float norm = VecNorm(vec_in);
vec_out[0] = vec_in[0] / norm;
vec_out[1] = vec_in[1] / norm;
vec_out[2] = vec_in[2] / norm;
}
__device__ inline float VecDot(float vecA[3], float vecB[3]) {
return vecA[0] * vecB[0] + vecA[1] * vecB[1] + vecA[2] * vecB[2];
}
// Cloud is required to be stored row major order of points i.e. for Pt_Row,Col = (x,y,z); cloud = [Pt_0,0, Pt_0,1, Pt_0,2,...Pt_0,n, Pt_1,,
__global__ void ComputeNormalsKernel(float* cloud, float* normals, int num_pts) {
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= num_pts)
return;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
int col = pt_idx % imSizes[DNX];
int row = roundf((pt_idx - col) / imSizes[DNX]);
float pt[3] = { 0.0 };
float pt_1col[3] = { 0.0 }; // Offset from pt by one column
float pt_1row[3] = { 0.0 }; // Offset from pt by one row
pt[0] = cloud[pt_idx * 3 + 0];
pt[1] = cloud[pt_idx * 3 + 1];
pt[2] = cloud[pt_idx * 3 + 2];
bool normError = false;
// Check whether we are at the border
if ((col == (num_cols - 1)) || (row == (num_rows - 1)))
normError = true;
if (!normError) {
pt_1col[0] = cloud[(pt_idx + 1) * 3 + 0];
pt_1col[1] = cloud[(pt_idx + 1) * 3 + 1];
pt_1col[2] = cloud[(pt_idx + 1) * 3 + 2];
pt_1row[0] = cloud[(pt_idx + num_cols) * 3 + 0];
pt_1row[1] = cloud[(pt_idx + num_cols) * 3 + 1];
pt_1row[2] = cloud[(pt_idx + num_cols) * 3 + 2];
}
// Check the three have valid depth readings
if (pt[2] < 0 || pt_1col[2] < 0 || pt_1row[2] < 0)
normError = true;
// Compute normal through local vector cross product
if (!normError) {
float vecRow[3], vecCol[3], normal[3], norm_normal[3];
VecMinus(pt_1col, pt, vecCol);
VecMinus(pt_1row, pt, vecRow);
VecCross(vecCol, vecRow, normal);
VecNormalise(normal, norm_normal);
normals[pt_idx * 3 + 0] = norm_normal[0];
normals[pt_idx * 3 + 1] = norm_normal[1];
normals[pt_idx * 3 + 2] = norm_normal[2];
}
// Set normal to greater than unit magnitude to show error computing normal
if (normError) {
normals[pt_idx * 3 + 0] = 2.0f;
normals[pt_idx * 3 + 1] = 2.0f;
normals[pt_idx * 3 + 2] = 2.0f;
}
}
// Finds a correspondance in the new cloud for every point in the old cloud
__global__ void ProjectiveAssociationKernel(float* trans_cloud_old, float* trans_normals_old, float* cloud_new, float* normals_new, int* correp_mask, int num_pts) {
int pt_old_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_old_idx >= num_pts)
return;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
float old_point[3], new_point[3], old_normal[3], new_normal[3];
old_point[0] = trans_cloud_old[pt_old_idx * 3 + 0];
old_point[1] = trans_cloud_old[pt_old_idx * 3 + 1];
old_point[2] = trans_cloud_old[pt_old_idx * 3 + 2];
old_normal[0] = trans_normals_old[pt_old_idx * 3 + 0];
old_normal[1] = trans_normals_old[pt_old_idx * 3 + 1];
old_normal[2] = trans_normals_old[pt_old_idx * 3 + 2];
bool correp_valid = true;
// Check old point has valid depth and old normal is valid unit vector
if (old_point[2] < 0 || old_normal[0] > 1.0)
correp_valid = false;
// Find where the transformed old 3D point projects to in new frame
int col_proj_new, row_proj_new;
if (correp_valid) {
float invZ = 1.0 / old_point[2];
col_proj_new = roundf((old_point[0] * camPams[DFX] * invZ) + camPams[DCX]);
row_proj_new = roundf((old_point[1] * camPams[DFY] * invZ) + camPams[DCY]);
// Check reprojection falls into valid image coordinates
if (col_proj_new < 0 || row_proj_new < 0 || col_proj_new >= num_cols || row_proj_new >= num_rows)
correp_valid = false;
}
// Get the new point and normal
int pt_new_idx;
if (correp_valid){
pt_new_idx = col_proj_new + row_proj_new*num_cols;
new_point[0] = cloud_new[pt_new_idx * 3 + 0];
new_point[1] = cloud_new[pt_new_idx * 3 + 1];
new_point[2] = cloud_new[pt_new_idx * 3 + 2];
new_normal[0] = normals_new[pt_new_idx * 3 + 0];
new_normal[1] = normals_new[pt_new_idx * 3 + 1];
new_normal[2] = normals_new[pt_new_idx * 3 + 2];
// Check the new point and normal is valid
if (new_point[2] < 0 || new_normal[0] > 1.0)
correp_valid = false;
else
atomicAdd(&num_correp[FOUND_IDX], 1);
}
// Check for valid correspondance by euclidean distance and angle thresholds
if (correp_valid) {
float distVec[3];
VecMinus(new_point, old_point, distVec);
float dist = VecNorm(distVec);
float angle = fabsf(VecDot(new_normal, old_normal));
if (dist > 0.1) {
correp_valid = false;
atomicAdd(&num_correp[FAILED_DIST_IDX], 1);
//if (pt_old_idx == 102650 || pt_old_idx == 102660) {
// printf("Old Pt %d trans to: (%.3f, %.3f, %.3f), projects to: (%d, %d) with correp new point (%.3f, %.3f, %.3f) and dist %f\n", pt_old_idx, old_point[0], old_point[1], old_point[2], col_proj_new, row_proj_new,
// new_point[0], new_point[1], new_point[2], dist);
//}
// Print statements for debugging to check projective association validity
}
//else {
// printf("Successful correspondance");
//}
}
// Mark a valid correspondance, or not
if (correp_valid) {
correp_mask[pt_old_idx] = pt_new_idx;
atomicAdd(&num_correp[TOTAL_IDX], 1);
}
else {
correp_mask[pt_old_idx] = -1;
}
}
__global__ void ComputeErrorFunc(float* source_old, float *dest_new, float *normals_new, int* matches_mask, float* A_mat, float* B_mat) {
int col_idx = threadIdx.x + blockIdx.x * blockDim.x;
int num_cols = imSizes[DNX];
int num_rows = imSizes[DNY];
int src_pt_idx = 0, dest_pt_idx = 0;
float src_pt[3], dest_pt[3], norm[3], cross[3], diff[3], dot;
float BMat_sum[6] = { 0.0 };
float AMat_sum[36] = { 0.0 };
int matches_this_col = 0;
for (int row = 0; row < num_rows; row++) {
src_pt_idx = col_idx + row * num_cols;
dest_pt_idx = matches_mask[src_pt_idx];
if (dest_pt_idx == -1)
continue;
matches_this_col++;
src_pt[0] = source_old[src_pt_idx * 3 + 0];
src_pt[1] = source_old[src_pt_idx * 3 + 1];
src_pt[2] = source_old[src_pt_idx * 3 + 2];
dest_pt[0] = dest_new[dest_pt_idx * 3 + 0];
dest_pt[1] = dest_new[dest_pt_idx * 3 + 1];
dest_pt[2] = dest_new[dest_pt_idx * 3 + 2];
norm[0] = normals_new[dest_pt_idx * 3 + 0];
norm[1] = normals_new[dest_pt_idx * 3 + 1];
norm[2] = normals_new[dest_pt_idx * 3 + 2];
VecCross(src_pt, norm, cross);
VecMinus(src_pt, dest_pt, diff);
dot = VecDot(diff, norm);
// Add to the B matrix
for (int i = 0; i < 3; i++) {
BMat_sum[i] -= cross[i] * dot;
BMat_sum[i + 3] -= norm[i] * dot;
}
// Storing column-major
for (int i = 0; i < 3; i++) {
float multTop = cross[i];
float multBot = norm[i];
for (int j = 0; j < 3; j++) {
AMat_sum[i + 6 * j] += multTop*cross[j];
AMat_sum[i + 6 * (j + 3)] += multTop*norm[j];
AMat_sum[i + 3 + 6 * j] += multBot*cross[j];
AMat_sum[i + 3 + 6 * (j + 3)] += multBot*norm[j];
}
}
// Print statements for debugging to check the computation of the error function
//if (col_idx == 200 && (matches_this_col == 170 || matches_this_col == 171)) {
// printf("Src_pt: %f, %f, %f\n", src_pt[0], src_pt[1], src_pt[2]);
// printf("Dest_pt: %f, %f, %f\n", dest_pt[0], dest_pt[1], dest_pt[2]);
// printf("Norm: %f, %f, %f\n", norm[0], norm[1], norm[2]);
// printf("Cross: %f, %f, %f\n", cross[0], cross[1], cross[2]);
// printf("Dot: %f\n", dot);
// printf("BMat: \n");
// for (int i = 0; i < 6; i++) {
// printf("%f\n", BMat_sum[i]);
// }
// printf("\nAmat:\n");
// for (int i = 0; i < 6; i++) {
// for (int j = 0; j < 6; j++) {
// printf("%f, ", AMat_sum[j * 6 + i]);
// }
// printf("\n");
// }
// printf("\n");
//}
}
// Copy back to global array
for (int i = 0; i < 36; i++) {
A_mat[col_idx * 36 + i] = AMat_sum[i];
}
for (int i = 0; i < 6; i++) {
B_mat[col_idx * 6 + i] = BMat_sum[i];
}
}
void RfromT(float* T_in, float* R_out) {
for (int i = 0; i < 16; i++) {
R_out[i] = T_in[i];
}
R_out[12] = R_out[13] = R_out[14] = 0.0f;
}
void MatMult(float* M1, float* M2, float* M_out, int rows1, int cols1, int cols2) {
float sum = 0;
for (int c = 0; c < rows1; c++)
{
for (int d = 0; d < cols2; d++)
{
for (int k = 0; k < cols1; k++)
{
sum = sum + M1[k*cols1 + c] * M2[d*cols2 + k];
}
M_out[d*cols2 + c] = sum;
sum = 0;
}
}
}
// THIS FUNCTION IS HARD CODED. USE WITH CAUTION
__global__ void ComputeDepthImStats(unsigned short* dimage) {
int col_idx = threadIdx.x;
int start_pt = blockIdx.y * 8 * 512 + blockIdx.x * 64 + col_idx;
__shared__ float sumArray[64];
__shared__ float varArray[64];
__shared__ int validArray[64];
__shared__ float mean;
__shared__ int valid;
sumArray[col_idx] = 0.0;
varArray[col_idx] = 0.0;
validArray[col_idx] = 0;
for (int i = 0; i < 53; i++) {
unsigned short d = dimage[start_pt + 512 * i];
if (d > 0) {
sumArray[col_idx] += (float)dimage[start_pt + 512 * i];
validArray[col_idx]++;
}
}
__syncthreads();
if (col_idx == 0) {
float sum = 0;
valid = 0;
for (int i = 0; i < 64; i++) {
sum += sumArray[i];
valid += validArray[i];
}
if (valid == 0) mean = 0;
else mean = sum / (valid);
}
__syncthreads();
for (int i = 0; i < 53; i++) {
unsigned short d = dimage[start_pt + 512 * i];
if (d > 0) {
varArray[col_idx] += (d - mean)* (d - mean);
}
}
__syncthreads();
if (col_idx == 0) {
float var = 0;
for (int i = 0; i < 64; i++) var += varArray[i];
if (valid == 0) var = 0;
else var /= (valid);
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 0] = mean;
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 1] = var;
imStats[blockIdx.x * 3 + blockIdx.y * 8 * 3 + 2] = valid;
}
}
void MakeTransIdentity(float* T) {
for (int i = 0; i < 16; i++) { T[i] = 0; }
T[0] = T[5] = T[10] = T[15] = 1.0;
}
cudaError_t MyCudaUtils::DenseICP(cv::Mat& dimage_new, cv::Mat& dimage_old, float* H_camPams, int* H_imSizes, float* T_hint, float depthStats[128]) {
assert(dimage_old.type() == CV_16U && dimage_new.type() == CV_16U);
assert(dimage_old.size == dimage_new.size);
int num_pts = dimage_new.rows * dimage_new.cols;
bool drawDepthMaps = false;
if (drawDepthMaps) DrawDepthMaps(dimage_new, dimage_old);
// Since cv::Mat is row major this actually performs the same job as column-major
// points-in-columns eigen matrix
cv::Mat normals_new(num_pts, 3, CV_32F);
cv::Mat cloud_new(num_pts, 3, CV_32F);
cv::Mat cloud_old(num_pts, 3, CV_32F);
cv::Mat mapped_cloud_old(num_pts, 3, CV_32F);
cv::Mat correp_mask(dimage_new.rows, dimage_new.cols, CV_32S);
// Device memory pointers
float *D_cloud_new, *D_cloud_old;
unsigned short *D_dimage_new, *D_dimage_old;
float *D_normals_new, *D_normals_old;
float *D_trans, *D_rot;
float *D_mapped_cloud_old, *D_mapped_normals_old;
int *D_correp_mask;
float *D_A_mat, *D_B_mat;
// Output transform
float T_curr[16] = { 0.0 };
bool drawTransCloud = false;
bool drawTransNormals = false;
bool drawInitProjection = false;
bool drawInitNormals = false;
bool drawCorrep = false;
try {
// Allocate for the clouds
CUDA_MALLOC(D_cloud_new, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_cloud_old, num_pts * 3 * sizeof(float));
// For the correspondance mask
CUDA_MALLOC(D_correp_mask, num_pts * sizeof(int));
// Copy across the depth images
CUDA_INIT_MEM(D_dimage_new, dimage_new.data, num_pts * sizeof(unsigned short));
CUDA_INIT_MEM(D_dimage_old, dimage_old.data, num_pts * sizeof(unsigned short));
// Allocate for the normal map
CUDA_MALLOC(D_normals_new, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_normals_old, num_pts * 3 * sizeof(float));
// For old cloud and normals transformed into estimated new camera space
CUDA_MALLOC(D_mapped_cloud_old, num_pts * 3 * sizeof(float));
CUDA_MALLOC(D_mapped_normals_old, num_pts * 3 * sizeof(float));
// Camera parameters for projection
CUDA_CHECK(cudaMemcpyToSymbol(camPams, H_camPams, CAMPAMSZ * sizeof(float)));
CUDA_CHECK(cudaMemcpyToSymbol(imSizes, H_imSizes, IMPAMSZ * sizeof(int)));
// A and B matrices summed across each column by a single GPU thread
CUDA_MALLOC(D_A_mat, dimage_new.cols * 36 * sizeof(float));
CUDA_MALLOC(D_B_mat, dimage_new.cols * 6 * sizeof(float));
// Perform the projection of both clouds
int blocks = cvCeil(num_pts / 128.0f);
ProjectKernel << < blocks, 128 >> >(D_dimage_new, D_dimage_old, D_cloud_new, D_cloud_old, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// For debugging - check the projection of the dense clouds
if (drawInitProjection) {
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_old.data, D_cloud_old, num_pts*sizeof(float)* 3);
DrawClouds((float *)cloud_new.data, num_pts, (float*)cloud_old.data, 0.8, 3.0);
}
// Compute the normals
ComputeNormalsKernel << < blocks, 128 >> >(D_cloud_new, D_normals_new, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
ComputeNormalsKernel << < blocks, 128 >> >(D_cloud_old, D_normals_old, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// For debugging, check the computation of the normals
if (drawInitNormals) {
CUDA_DOWNLOAD(normals_new.data, D_normals_new, num_pts * 3 * sizeof(float));
DrawNormals((float*) cloud_new.data, num_pts, (float*) normals_new.data, 0.08, 0.26);
}
// Compute statistics for the depth image. For an equally sized 8x8 grid
// compute the mean and std_dev of the depth pixels
ComputeDepthImStats << <dim3(8, 8, 1), 64 >> >(D_dimage_new);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpyFromSymbol(depthStats, imStats, 192 * sizeof(float)));
// Perform the initial transformation of old points and normals to new frame
//T_hint[12] = -0.02;
//T_hint[13] = -0.02;
//T_hint[14] = 0.02;
CUDA_INIT_MEM(D_trans, T_hint, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_INIT_MEM(D_rot, R, 16 * sizeof(float));
float deltaX = 100;
float stateVec[6] = { 10.0 };
for (int i = 0; i< 16; i++) T_curr[i] = T_hint[i];
int iter = 0;
char file[256];
// Begin the loop of iterating to completion
int maxIter = 30;
bool failed_hint = false;
while (deltaX > 1e-8 && iter++ < maxIter) {
// Transform old points and normals by current estimate
TransformKernel<<< blocks, 128 >>>(D_cloud_old, D_trans, D_mapped_cloud_old, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
TransformKernel <<< blocks, 128 >>>(D_normals_old, D_rot, D_mapped_normals_old, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
// For debugging, draw transformed old cloud and the two on top of each other
if (drawTransCloud) {
CUDA_DOWNLOAD(mapped_cloud_old.data, D_mapped_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts *sizeof(float)* 3);
sprintf_s(file, "Iter%d.png", iter-1);
DrawClouds((float*)cloud_new.data, num_pts, (float*)mapped_cloud_old.data, 0.8, 3.0, file);
// For saving screenshots
//sprintf_s(file, "Iter%d.png", iter++);
}
// For debugging, draws the transformed normals
if (drawTransNormals) {
CUDA_DOWNLOAD(normals_new.data, D_mapped_normals_old, num_pts * 3 * sizeof(float));
DrawNormals((float*)cloud_new.data, num_pts, (float*)normals_new.data, 0.08, 0.26);
}
// Get the correspondance map through projective data assocation
int host_num_correp[4] = { 0 };
CUDA_CHECK(cudaMemcpyToSymbol(num_correp, &host_num_correp, 4*sizeof(int)));
ProjectiveAssociationKernel << <blocks, 128 >> >(D_mapped_cloud_old, D_mapped_normals_old, D_cloud_new, D_normals_new, D_correp_mask, num_pts);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpyFromSymbol( &host_num_correp, num_correp, 4*sizeof(int)));
if (drawCorrep ) {
CUDA_DOWNLOAD(mapped_cloud_old.data, D_mapped_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_new.data, D_cloud_new, num_pts *sizeof(float)* 3);
CUDA_DOWNLOAD(cloud_old.data, D_cloud_old, num_pts*sizeof(float)* 3);
CUDA_DOWNLOAD(correp_mask.data, D_correp_mask, num_pts * sizeof(int));
DrawCorrep((float*)mapped_cloud_old.data, (float*)cloud_new.data, num_pts, (int*)correp_mask.data);
}
// Hint might be bad, try restarting with identity
if (host_num_correp[0] < 100 && failed_hint == false) {
failed_hint = true;
MakeTransIdentity(T_curr);
CUDA_UPLOAD(D_trans, T_curr, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_UPLOAD(D_rot, R, 16 * sizeof(float));
continue;
}
// Hint and identity failed, ICP has failed and return identity
else if (host_num_correp[0] < 100 && failed_hint == true) {
MakeTransIdentity(T_curr);
break;
}
//QueryPoint( correp_mask);
// Compute the Ax - b = 0 error function
ComputeErrorFunc << <16, 32 >> >(D_mapped_cloud_old, D_cloud_new, D_normals_new, D_correp_mask, D_A_mat, D_B_mat);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
float* H_A = new float[dimage_new.cols * 36];
float* H_B = new float[dimage_new.cols * 6];
CUDA_DOWNLOAD(H_A, D_A_mat, dimage_new.cols * 36 * sizeof(float));
CUDA_DOWNLOAD(H_B, D_B_mat, dimage_new.cols * 6 * sizeof(float));
// Solve
float newStateVec[6];
FindMinimisingTransform(H_A, H_B, dimage_new.cols, newStateVec);
delete[] H_A;
delete[] H_B;
// Compute amount of change in x - break if small
deltaX = 0.0;
for (int i = 0; i < 6; i++)
deltaX += (newStateVec[0])*(newStateVec[0]);
for (int i = 0; i < 6; i++) stateVec[i] = newStateVec[i];
// Compute equivalent transform and compound with existing estimate
float alp = stateVec[0], bet = stateVec[1], gam = stateVec[2];
float tx = stateVec[3], ty = stateVec[4], tz = stateVec[5];
float T[16] = { 0.0 };
T[0] = cos(gam)*cos(bet);
T[1] = sin(gam)*cos(bet);
T[2] = -sin(bet);
T[4] = -sin(gam)*cos(alp) + cos(gam)*sin(bet)*sin(alp);
T[5] = cos(gam)*cos(alp) + sin(gam)*sin(bet)*sin(alp);
T[6] = cos(bet)*sin(alp);
T[8] = sin(gam)*sin(alp) + cos(gam)*sin(bet)*cos(alp);
T[9] = -cos(gam)*sin(alp) + sin(gam)*sin(bet)*cos(alp);
T[10] = cos(bet)*cos(alp);
T[12] = tx; T[13] = ty; T[14] = tz;
T[15] = 1.0;
float T_temp[16];
MatMult(T_curr, T, T_temp, 4, 4, 4);
for (int i = 0; i < 16; i++) T_curr[i] = T_temp[i];
CUDA_UPLOAD(D_trans, T_curr, 16 * sizeof(float));
float R[16];
RfromT(T_hint, R);
CUDA_UPLOAD(D_rot, R, 16 * sizeof(float));
}
// Did not converge, return Identity
if (iter == maxIter) {
for (int i = 0; i < 16; i++) { T_curr[i] = 0; }
T_curr[0] = T_curr[5] = T_curr[10] = T_curr[15] = 1.0;
}
throw(cudaSuccess);
}
catch (cudaError_t cudaStatus) {
cudaFree(D_cloud_new);
cudaFree(D_cloud_old);
cudaFree(D_dimage_new);
cudaFree(D_dimage_old);
cudaFree(D_normals_new);
cudaFree(D_normals_old);
cudaFree(D_mapped_cloud_old);
cudaFree(D_mapped_normals_old);
cudaFree(D_correp_mask);
cudaFree(D_trans);
cudaFree(D_rot);
cudaFree(D_A_mat);
cudaFree(D_B_mat);
for (int i = 0; i < 16; i++) T_hint[i] = T_curr[i];
return cudaStatus;
}
// Create normal map for the new depth image
// Associate points by
}
|
64be54bc3524b3fcaae57054573d29600309159d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ volatile unsigned int count = 0;
/* The kernel "msort" should sort the input array using parallel merge-sort. */
__global__ void msort(int *d_input, int* d_temp, int N)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
const int c = (int)ceilf(log2f(N));
int top = 0;
int upper_bound = (int)ceilf((float)N/2);
for (int i=0; i<c; ++i) {
if (id < upper_bound) {
int j = id*(1 << (i+1));
int diff = (1 << i);
int j_max = j+diff;
int k = j_max;
int k_max = k+diff;
int t_cnt = j;
if (j_max < N) {
if (k_max > N) {
k_max = N;
}
while (j < j_max && k < k_max) {
if (d_input[j] < d_input[k]) {
d_temp[t_cnt++] = d_input[j++];
} else {
d_temp[t_cnt++] = d_input[k++];
}
}
while (j < j_max) {
d_temp[t_cnt++] = d_input[j++];
}
while (k < k_max) {
d_temp[t_cnt++] = d_input[k++];
}
for (int cnt = id*(1 << (i+1)); cnt < k_max; ++cnt) {
d_input[cnt] = d_temp[cnt];
}
}
__threadfence();
atomicInc((unsigned int*)(&count), 1 << 30);
top += upper_bound;
while (count < top);
upper_bound = (int)ceilf((float)upper_bound/2);
}
}
if (id == 0) {
count = 0;
}
}
|
64be54bc3524b3fcaae57054573d29600309159d.cu
|
#include <stdio.h>
__device__ volatile unsigned int count = 0;
/* The kernel "msort" should sort the input array using parallel merge-sort. */
__global__ void msort(int *d_input, int* d_temp, int N)
{
const int id = blockIdx.x * blockDim.x + threadIdx.x;
const int c = (int)ceilf(log2f(N));
int top = 0;
int upper_bound = (int)ceilf((float)N/2);
for (int i=0; i<c; ++i) {
if (id < upper_bound) {
int j = id*(1 << (i+1));
int diff = (1 << i);
int j_max = j+diff;
int k = j_max;
int k_max = k+diff;
int t_cnt = j;
if (j_max < N) {
if (k_max > N) {
k_max = N;
}
while (j < j_max && k < k_max) {
if (d_input[j] < d_input[k]) {
d_temp[t_cnt++] = d_input[j++];
} else {
d_temp[t_cnt++] = d_input[k++];
}
}
while (j < j_max) {
d_temp[t_cnt++] = d_input[j++];
}
while (k < k_max) {
d_temp[t_cnt++] = d_input[k++];
}
for (int cnt = id*(1 << (i+1)); cnt < k_max; ++cnt) {
d_input[cnt] = d_temp[cnt];
}
}
__threadfence();
atomicInc((unsigned int*)(&count), 1 << 30);
top += upper_bound;
while (count < top);
upper_bound = (int)ceilf((float)upper_bound/2);
}
}
if (id == 0) {
count = 0;
}
}
|
d32b178d79255c20f5f0e6d12a7ed52fbd03fe30.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author:
* Yixin Li, Email: [email protected]
* convert the image from RGB to LAB
*/
__global__ void rgb_to_lab( double * img, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
double sR = img[3*t];
double sG = img[3*t+1];
double sB = img[3*t+2];
if (sR!=sR || sG!=sG || sB!=sB) return;
//RGB (D65 illuninant assumption) to XYZ conversion
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//convert from XYZ to LAB
double epsilon = 0.008856; //actual CIE standard
double kappa = 903.3; //actual CIE standard
double Xr = 0.950456; //reference white
double Yr = 1.0; //reference white
double Zr = 1.088754; //reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
double lval = 116.0*fy-16.0;
double aval = 500.0*(fx-fy);
double bval = 200.0*(fy-fz);
img[3*t] = lval;
img[3*t+1] = aval;
img[3*t+2] = bval;
}
|
d32b178d79255c20f5f0e6d12a7ed52fbd03fe30.cu
|
/*
* Author:
* Yixin Li, Email: [email protected]
* convert the image from RGB to LAB
*/
__global__ void rgb_to_lab( double * img, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
double sR = img[3*t];
double sG = img[3*t+1];
double sB = img[3*t+2];
if (sR!=sR || sG!=sG || sB!=sB) return;
//RGB (D65 illuninant assumption) to XYZ conversion
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//convert from XYZ to LAB
double epsilon = 0.008856; //actual CIE standard
double kappa = 903.3; //actual CIE standard
double Xr = 0.950456; //reference white
double Yr = 1.0; //reference white
double Zr = 1.088754; //reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
double lval = 116.0*fy-16.0;
double aval = 500.0*(fx-fy);
double bval = 200.0*(fy-fz);
img[3*t] = lval;
img[3*t+1] = aval;
img[3*t+2] = bval;
}
|
7b384e377dbd15d64f415c7adee18ae6c5887465.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <hiprand/hiprand.h>
#include "hiprand/hiprand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
const int THR_NUMBER = 30;
#define SETBLOCKNUM 5
// #define L 122
const int L = (THR_NUMBER -2)* SETBLOCKNUM +2;
// #define MULTISPIN unsigned char
#define MULTISPIN unsigned int
const int MULTISIZE = sizeof(MULTISPIN) *8;
#define T_CYCLE_START 2.26
#define T_CYCLE_END 2.275
#define T_CYCLE_STEP 0.002
#define SINGLETEMP 2.26918531
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// static const float EXP4_TRESHOLD = exp( -(4.*J) / T);
// static const float EXP8_TRESHOLD = exp( -(8.*J) / T);
#define STEPS_REPEAT 3
#define T_MAX_SIM 100
#define T_MEASURE_WAIT 20
#define T_MEASURE_INTERVAL 10
// print history true/false
#define HISTORY 1
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// float variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
// }
// RNG init kernel
__global__ void initRNG(hiprandState_t * const rngStates, const int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
hiprand_init(seed, tid, 0, &rngStates[tid]);
}
struct coords {
int x;
int y;
};
__device__ coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ float dev_unitrand( hiprandState_t * const rngStates, unsigned int tid ){
hiprandState_t localState = rngStates[tid];
float val = hiprand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
// index has to be less that MULTISIZE
__device__ void dev_set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
__device__ void dev_set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
__device__ MULTISPIN dev_read_spin(MULTISPIN multi, int index) {
return ( (multi >> index) & 1 );
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
__device__ MULTISPIN generate_exp4_mask(float exp4, float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but slightly simplifies some things
// if( exp4 > random_number) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
__device__ MULTISPIN generate_exp8_mask(float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( random_number < exp8 ) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// can segfault
__device__ static inline MULTISPIN dev_shared_grid_step(MULTISPIN shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
__device__ void dev_update_multispin_shared(MULTISPIN grid[THR_NUMBER*THR_NUMBER], int x, int y, float exp4, float exp8, hiprandState_t * const rngStates, int tid ) {
MULTISPIN s0 = grid[x+y*THR_NUMBER];
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, rngStates, tid ); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, rngStates, tid );
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ dev_shared_grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ dev_shared_grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ dev_shared_grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ dev_shared_grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*THR_NUMBER] = grid[x+y*THR_NUMBER] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
//
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
//
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("");
}
printf("\n");
}
printf("\n");
}
}
__global__ void dev_measure_cycle_kernel(MULTISPIN * dev_grid, hiprandState_t * const rngStates, float * dev_single_run_avgs, int * dev_partial_res, float exp4, float exp8, int ksim ) {
// setup
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ MULTISPIN shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = dev_grid[(glob_x )+ (glob_y )*L ];
__syncthreads();
// allocate shared memory for measure results
// magnetization
__shared__ int blocksum[ MULTISIZE ];
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
for (int multik=0; multik<MULTISIZE; multik++) {
blocksum[ multik ] = 0;
}
}
__syncthreads();
////////////////////////////////////////////
////// measure
////////////////////////////////////////////
if(ksim > T_MEASURE_WAIT && ksim % T_MEASURE_INTERVAL == 0) {
// this condition does not depend on the thread id in any way
for (int multik=0; multik<MULTISIZE; multik++) {
// magnetization
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int lspin = (int) dev_read_spin(shared_grid[threadIdx.x + threadIdx.y*THR_NUMBER], multik );
atomicAdd( &(blocksum[ multik ]), lspin ); // change with pointer arithm
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
int blockntot = (THR_NUMBER-2)*(THR_NUMBER-2);
float nval = ((float) ( blocksum[ multik] *2 - blockntot ))/ ( (float) blockntot );
atomicAdd(&(dev_single_run_avgs[multik]), nval);
blocksum[ multik ] = 0;
}
}
}
__syncthreads();
////////////////////////////////////////////
////// update
////////////////////////////////////////////
// macro-checkboards
// macro-white
if( (blockIdx.x + blockIdx.y%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
// if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
// threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
// }
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + blockIdx.y%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
}
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
// __syncthreads();
}
void parall_measure_cycle(MULTISPIN startgrid[L*L], MULTISPIN * dev_grid, float exp4, float exp8, hiprandState_t * const rngStates, FILE *resf) {
float n_measures_per_sim = (float) ((T_MAX_SIM - T_MEASURE_WAIT)/T_MEASURE_INTERVAL);
// space for tracking magnetization
float single_run_avgs[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {single_run_avgs[k] = 0.;}
float * dev_single_run_avgs;
hipMalloc(&dev_single_run_avgs, MULTISIZE*sizeof(float));
hipMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyHostToDevice);
// extra space needed by update_magnetization
int partial_res[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {partial_res[k] = 0;}
int * dev_partial_res;
hipMalloc(&dev_partial_res, MULTISIZE*sizeof(int));
hipMemcpy(dev_partial_res, &partial_res, MULTISIZE*sizeof(int), hipMemcpyHostToDevice);
// outer average
struct avg_tr avg_of_runs = new_avg_tr( MULTISIZE * STEPS_REPEAT );
for( int krep=0; krep< STEPS_REPEAT; krep++) {
if (HISTORY) printf("# simulation %i\n", krep+1);
if (HISTORY) printf("# waiting thermalization for the first %i sim steps.\n", T_MEASURE_WAIT);
hipMemcpy(dev_grid, startgrid, L*L*sizeof(MULTISPIN), hipMemcpyHostToDevice);
// kernel
for (int ksim=0; ksim<T_MAX_SIM; ksim++) {
hipLaunchKernelGGL(( dev_measure_cycle_kernel), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_grid, rngStates, dev_single_run_avgs, dev_partial_res, exp4, exp8, ksim );
}
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("kernel: ERROR: %s\n", hipGetErrorString(err));
} else printf("kernel: no ERROR: %s\n", hipGetErrorString(err));
// results
// magnetization
hipMemcpy(&single_run_avgs, dev_single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyDeviceToHost);
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = single_run_avgs[multik] / (n_measures_per_sim * BLOCK_NUMBER*BLOCK_NUMBER); // change
if (HISTORY) printf("# average on bit %i\n: %f\n", multik+1, lres);
update_avg(&avg_of_runs, lres);
// reset averages
single_run_avgs[multik] = 0.;
partial_res[multik] = 0;
}
//reset on device
hipMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_partial_res, & partial_res, MULTISIZE*sizeof(int), hipMemcpyHostToDevice);
if (HISTORY) printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
// magn
float l2av = average(avg_of_runs);
float l2stdev = stdev(avg_of_runs);
if (HISTORY) printf("# overall average \n: %f +- %f\n", l2av, l2stdev);
fprintf(resf, "%f ", l2av);
fprintf(resf, "%f\n", l2stdev);
// grid for displaying end-state (of last rep only)
MULTISPIN endgrid[L*L];
hipMemcpy(endgrid, dev_grid, L*L*sizeof(MULTISPIN), hipMemcpyDeviceToHost);
if (HISTORY) multidump_first(endgrid);
hipFree(dev_partial_res);
hipFree(dev_single_run_avgs);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
// still used for init_random_grid
srand(SEED);
// hiprand init
// Allocate memory for RNG states
hiprandState_t *d_rngStates = 0;
hipMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(hiprandState_t));
// Initialise RNG
hipLaunchKernelGGL(( initRNG), dim3(BLOCKS), dim3(THREADS), 0, 0, d_rngStates, SEED);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("initRNG: ERROR: %s\n", hipGetErrorString(err));
} else printf("initRNG: no ERROR: %s\n", hipGetErrorString(err));
// device grid
MULTISPIN * dev_grid;
hipMalloc(&dev_grid, L*L*sizeof(MULTISPIN));
// original grid on the cpu
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
// multidump_a_few(startgrid);
// // temp cycle:
// for( float kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// const float EXP4 = exp( -(4.*J) / kt);
// const float EXP8 = exp( -(8.*J) / kt);
// fprintf(resf, "%f ", kt);
// if (HISTORY) printf("temperature: %f\n", kt);
// parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
// }
// // // // only 1:
// // // // just one:
const float EXP4 = exp( -(4.*J) / SINGLETEMP);
const float EXP8 = exp( -(8.*J) / SINGLETEMP);
fprintf(resf, "%f ", SINGLETEMP);
if (HISTORY) printf("temperature: %f\n", SINGLETEMP);
parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
printf(" ERROR? rng malloc size: %i\n", THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(hiprandState_t));
printf(" ERROR? shared memory used: %i\n", THR_NUMBER*THR_NUMBER*sizeof(MULTISPIN) + BLOCK_NUMBER*BLOCK_NUMBER*MULTISIZE*sizeof(int));
hipFree(d_rngStates);
hipFree(dev_grid);
hipEventRecord(stop);
hipEventSynchronize(stop);
float total_time = 0;
hipEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((STEPS_REPEAT))) * ((long int)(T_MAX_SIM)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
|
7b384e377dbd15d64f415c7adee18ae6c5887465.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <curand.h>
#include "curand_kernel.h"
#include <assert.h>
// L should be (multiple of (THR_NUMBER - 2) ) + 2
const int THR_NUMBER = 30;
#define SETBLOCKNUM 5
// #define L 122
const int L = (THR_NUMBER -2)* SETBLOCKNUM +2;
// #define MULTISPIN unsigned char
#define MULTISPIN unsigned int
const int MULTISIZE = sizeof(MULTISPIN) *8;
#define T_CYCLE_START 2.26
#define T_CYCLE_END 2.275
#define T_CYCLE_STEP 0.002
#define SINGLETEMP 2.26918531
int n_temps = ( T_CYCLE_END - T_CYCLE_START )/ (T_CYCLE_STEP);
#define J 1.
#define SEED 1000
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
// static const float EXP4_TRESHOLD = exp( -(4.*J) / T);
// static const float EXP8_TRESHOLD = exp( -(8.*J) / T);
#define STEPS_REPEAT 3
#define T_MAX_SIM 100
#define T_MEASURE_WAIT 20
#define T_MEASURE_INTERVAL 10
// print history true/false
#define HISTORY 1
const int BLOCK_NUMBER = ( L-2)/( THR_NUMBER - 2 );
const dim3 BLOCKS( BLOCK_NUMBER, BLOCK_NUMBER );
const dim3 THREADS( THR_NUMBER, THR_NUMBER );
// average tracker struct
struct avg_tr {
float sum;
float sum_squares;
int n;
};
struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
void update_avg(struct avg_tr * tr_p, float newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
float average( struct avg_tr tr) {
return (tr.sum)/((float) tr.n) ;
}
float stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
}
// float variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((float) tr.n) - pow(( (tr.sum)/((float) tr.n) ),2) );
// }
// RNG init kernel
__global__ void initRNG(curandState * const rngStates, const int seed) {
// Determine thread ID
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
// Initialise the RNG
curand_init(seed, tid, 0, &rngStates[tid]);
}
struct coords {
int x;
int y;
};
__device__ coords dev_get_thread_coords() {
struct coords thread_coords;
thread_coords.x = blockIdx.x*( THR_NUMBER - 2 ) + ( threadIdx.x ) ;
thread_coords.y = blockIdx.y*( THR_NUMBER - 2 ) + ( threadIdx.y ) ;
return thread_coords;
}
// float unitrand(){
// return (float)rand() / (float)RAND_MAX;
// }
__device__ float dev_unitrand( curandState * const rngStates, unsigned int tid ){
curandState localState = rngStates[tid];
float val = curand_uniform(&localState);
rngStates[tid] = localState;
return val;
}
// index has to be less that MULTISIZE
__device__ void dev_set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
__device__ void dev_set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
__device__ MULTISPIN dev_read_spin(MULTISPIN multi, int index) {
return ( (multi >> index) & 1 );
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
__device__ MULTISPIN generate_exp4_mask(float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but slightly simplifies some things
// if( exp4 > random_number) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
__device__ MULTISPIN generate_exp8_mask(float exp8, curandState * const rngStates, int tid ) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
float random_number = dev_unitrand(rngStates, tid);
if( random_number < exp8 ) {
dev_set_spin_1(&res, k);
} else {
dev_set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// can segfault
__device__ static inline MULTISPIN dev_shared_grid_step(MULTISPIN shared_grid[THR_NUMBER*THR_NUMBER], int x, int y, int xstep, int ystep) {
return shared_grid[(x+xstep) + (y+ystep)*THR_NUMBER];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
__device__ void dev_update_multispin_shared(MULTISPIN grid[THR_NUMBER*THR_NUMBER], int x, int y, float exp4, float exp8, curandState * const rngStates, int tid ) {
MULTISPIN s0 = grid[x+y*THR_NUMBER];
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, rngStates, tid ); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, rngStates, tid );
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ dev_shared_grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ dev_shared_grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ dev_shared_grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ dev_shared_grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*THR_NUMBER] = grid[x+y*THR_NUMBER] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
//
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
//
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
}
__global__ void dev_measure_cycle_kernel(MULTISPIN * dev_grid, curandState * const rngStates, float * dev_single_run_avgs, int * dev_partial_res, float exp4, float exp8, int ksim ) {
// setup
struct coords glob_coords = dev_get_thread_coords();
int glob_x = glob_coords.x;
int glob_y = glob_coords.y;
// Determine thread ID (for RNG)
int blockId = blockIdx.x+ blockIdx.y * gridDim.x;
int tid = blockId * (blockDim.x * blockDim.y)+ (threadIdx.y * blockDim.x)+ threadIdx.x;
__shared__ MULTISPIN shared_grid[ THR_NUMBER*THR_NUMBER ];
shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] = dev_grid[(glob_x )+ (glob_y )*L ];
__syncthreads();
// allocate shared memory for measure results
// magnetization
__shared__ int blocksum[ MULTISIZE ];
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
for (int multik=0; multik<MULTISIZE; multik++) {
blocksum[ multik ] = 0;
}
}
__syncthreads();
////////////////////////////////////////////
////// measure
////////////////////////////////////////////
if(ksim > T_MEASURE_WAIT && ksim % T_MEASURE_INTERVAL == 0) {
// this condition does not depend on the thread id in any way
for (int multik=0; multik<MULTISIZE; multik++) {
// magnetization
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 && threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
int lspin = (int) dev_read_spin(shared_grid[threadIdx.x + threadIdx.y*THR_NUMBER], multik );
atomicAdd( &(blocksum[ multik ]), lspin ); // change with pointer arithm
}
__syncthreads();
if ( threadIdx.x == 0 && threadIdx.y == 0 ) {
int blockntot = (THR_NUMBER-2)*(THR_NUMBER-2);
float nval = ((float) ( blocksum[ multik] *2 - blockntot ))/ ( (float) blockntot );
atomicAdd(&(dev_single_run_avgs[multik]), nval);
blocksum[ multik ] = 0;
}
}
}
__syncthreads();
////////////////////////////////////////////
////// update
////////////////////////////////////////////
// macro-checkboards
// macro-white
if( (blockIdx.x + blockIdx.y%2)%2 == 0 ) {
/////////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
// if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
// threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
// dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
// }
//////////
}
__syncthreads();
// macro-black
if( (blockIdx.x + blockIdx.y%2)%2 == 1 ) {
//////////
// checkboards
// update only in the inner 30x30 block of threads, because the edge threads aren't mapped to any grid spins
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// white
if( (glob_x + glob_y%2)%2 == 0 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
if ( threadIdx.x != 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y != 0 && threadIdx.y != THR_NUMBER-1 ) {
// black
if( (glob_x + glob_y%2)%2 == 1 ) {
dev_update_multispin_shared( shared_grid, threadIdx.x, threadIdx.y, exp4, exp8, rngStates, tid);
}
}
__syncthreads();
}
if ( threadIdx.x > 0 && threadIdx.x != THR_NUMBER-1 &&
threadIdx.y > 0 && threadIdx.y != THR_NUMBER-1 ) {
dev_grid[(glob_x )+ (glob_y )*L ] = shared_grid[ threadIdx.x + threadIdx.y*THR_NUMBER ] ;
}
//////////
// __syncthreads();
}
void parall_measure_cycle(MULTISPIN startgrid[L*L], MULTISPIN * dev_grid, float exp4, float exp8, curandState * const rngStates, FILE *resf) {
float n_measures_per_sim = (float) ((T_MAX_SIM - T_MEASURE_WAIT)/T_MEASURE_INTERVAL);
// space for tracking magnetization
float single_run_avgs[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {single_run_avgs[k] = 0.;}
float * dev_single_run_avgs;
cudaMalloc(&dev_single_run_avgs, MULTISIZE*sizeof(float));
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
// extra space needed by update_magnetization
int partial_res[MULTISIZE];
for (int k=0; k<MULTISIZE; k++) {partial_res[k] = 0;}
int * dev_partial_res;
cudaMalloc(&dev_partial_res, MULTISIZE*sizeof(int));
cudaMemcpy(dev_partial_res, &partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
// outer average
struct avg_tr avg_of_runs = new_avg_tr( MULTISIZE * STEPS_REPEAT );
for( int krep=0; krep< STEPS_REPEAT; krep++) {
if (HISTORY) printf("# simulation %i\n", krep+1);
if (HISTORY) printf("# waiting thermalization for the first %i sim steps.\n", T_MEASURE_WAIT);
cudaMemcpy(dev_grid, startgrid, L*L*sizeof(MULTISPIN), cudaMemcpyHostToDevice);
// kernel
for (int ksim=0; ksim<T_MAX_SIM; ksim++) {
dev_measure_cycle_kernel<<<BLOCKS, THREADS>>>(dev_grid, rngStates, dev_single_run_avgs, dev_partial_res, exp4, exp8, ksim );
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("kernel: ERROR: %s\n", cudaGetErrorString(err));
} else printf("kernel: no ERROR: %s\n", cudaGetErrorString(err));
// results
// magnetization
cudaMemcpy(&single_run_avgs, dev_single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyDeviceToHost);
for(int multik=0; multik <MULTISIZE; multik++) {
float lres = single_run_avgs[multik] / (n_measures_per_sim * BLOCK_NUMBER*BLOCK_NUMBER); // change
if (HISTORY) printf("# average on bit %i\n: %f\n", multik+1, lres);
update_avg(&avg_of_runs, lres);
// reset averages
single_run_avgs[multik] = 0.;
partial_res[multik] = 0;
}
//reset on device
cudaMemcpy(dev_single_run_avgs, &single_run_avgs, MULTISIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_partial_res, & partial_res, MULTISIZE*sizeof(int), cudaMemcpyHostToDevice);
if (HISTORY) printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
// magn
float l2av = average(avg_of_runs);
float l2stdev = stdev(avg_of_runs);
if (HISTORY) printf("# overall average \n: %f +- %f\n", l2av, l2stdev);
fprintf(resf, "%f ", l2av);
fprintf(resf, "%f\n", l2stdev);
// grid for displaying end-state (of last rep only)
MULTISPIN endgrid[L*L];
cudaMemcpy(endgrid, dev_grid, L*L*sizeof(MULTISPIN), cudaMemcpyDeviceToHost);
if (HISTORY) multidump_first(endgrid);
cudaFree(dev_partial_res);
cudaFree(dev_single_run_avgs);
}
int main() {
// L should be (multiple of THR_NUMBER -2) + 2
assert( ((L-2)% (THR_NUMBER-2) )== 0 );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# gpu1\n");
fprintf(resf, "# parameters:\n# linear_size: %i\n", L);
fprintf(resf, "# coupling: %f\n# repetitions: %i\n", J, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", T_MAX_SIM,T_MEASURE_WAIT, T_MEASURE_INTERVAL, SEED);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n", AREA, NTOT);
fprintf(resf, "\n");
fprintf(resf, "# columns: temperature - average magnetization - uncertainty \n");
// still used for init_random_grid
srand(SEED);
// curand init
// Allocate memory for RNG states
curandState *d_rngStates = 0;
cudaMalloc((void **)&d_rngStates, THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
// Initialise RNG
initRNG<<<BLOCKS, THREADS>>>(d_rngStates, SEED);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("initRNG: ERROR: %s\n", cudaGetErrorString(err));
} else printf("initRNG: no ERROR: %s\n", cudaGetErrorString(err));
// device grid
MULTISPIN * dev_grid;
cudaMalloc(&dev_grid, L*L*sizeof(MULTISPIN));
// original grid on the cpu
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
// multidump_a_few(startgrid);
// // temp cycle:
// for( float kt=T_CYCLE_START; kt<T_CYCLE_END; kt+=T_CYCLE_STEP ) {
// const float EXP4 = exp( -(4.*J) / kt);
// const float EXP8 = exp( -(8.*J) / kt);
// fprintf(resf, "%f ", kt);
// if (HISTORY) printf("temperature: %f\n", kt);
// parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
// }
// // // // only 1:
// // // // just one:
const float EXP4 = exp( -(4.*J) / SINGLETEMP);
const float EXP8 = exp( -(8.*J) / SINGLETEMP);
fprintf(resf, "%f ", SINGLETEMP);
if (HISTORY) printf("temperature: %f\n", SINGLETEMP);
parall_measure_cycle(startgrid, dev_grid, EXP4, EXP8, d_rngStates, resf);
printf(" ERROR? rng malloc size: %i\n", THR_NUMBER*THR_NUMBER*BLOCK_NUMBER*BLOCK_NUMBER*sizeof(curandState));
printf(" ERROR? shared memory used: %i\n", THR_NUMBER*THR_NUMBER*sizeof(MULTISPIN) + BLOCK_NUMBER*BLOCK_NUMBER*MULTISIZE*sizeof(int));
cudaFree(d_rngStates);
cudaFree(dev_grid);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float total_time = 0;
cudaEventElapsedTime(&total_time, start, stop);
FILE *timef = fopen("time.txt", "w");
long int total_flips = ((long int)(n_temps))* ((long int)((STEPS_REPEAT))) * ((long int)(T_MAX_SIM)) * ((long int)(NTOT));
fprintf(timef, "# gpu1\n");
fprintf(timef, "# total execution time (milliseconds):\n");
fprintf(timef, "%f\n", total_time);
fprintf(timef, "# total spin flips performed:\n");
fprintf(timef, "%li\n", total_flips);
fprintf(timef, "# average spin flips per millisecond:\n");
fprintf(timef, "%Lf\n", ((long double) total_flips )/( (long double) total_time ) );
fclose(timef);
fclose(resf);
return 0;
}
|
2aa037771b1f46c1f404ad0607266480f84bed36.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/tests
*/
#include <petscdevice_cuda.h>
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
typedef struct {
PetscInt ndim;
PetscInt *dim;
hipfftHandle p_forward, p_backward;
hipfftComplex *devArray;
} Mat_CUFFT;
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
PetscFunctionBegin;
PetscCall(VecGetArray(x, &x_array));
PetscCall(VecGetArray(y, &y_array));
if (!cufft->p_forward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
PetscCallCUFFT(hipfftPlan1d(&cufft->p_forward, dim[0], HIPFFT_C2C, 1));
break;
case 2:
PetscCallCUFFT(hipfftPlan2d(&cufft->p_forward, dim[0], dim[1], HIPFFT_C2C));
break;
case 3:
PetscCallCUFFT(hipfftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], HIPFFT_C2C));
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %" PetscInt_FMT "-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
PetscCallCUDA(hipMemcpy(devArray, x_array, sizeof(hipfftComplex) * dim[ndim], hipMemcpyHostToDevice));
/* execute transform */
PetscCallCUFFT(hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_FORWARD));
/* transfer from GPU memory */
PetscCallCUDA(hipMemcpy(y_array, devArray, sizeof(hipfftComplex) * dim[ndim], hipMemcpyDeviceToHost));
PetscCall(VecRestoreArray(y, &y_array));
PetscCall(VecRestoreArray(x, &x_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
PetscFunctionBegin;
PetscCall(VecGetArray(x, &x_array));
PetscCall(VecGetArray(y, &y_array));
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
PetscCallCUFFT(hipfftPlan1d(&cufft->p_backward, dim[0], HIPFFT_C2C, 1));
break;
case 2:
PetscCallCUFFT(hipfftPlan2d(&cufft->p_backward, dim[0], dim[1], HIPFFT_C2C));
break;
case 3:
PetscCallCUFFT(hipfftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], HIPFFT_C2C));
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %" PetscInt_FMT "-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
PetscCallCUDA(hipMemcpy(devArray, x_array, sizeof(hipfftComplex) * dim[ndim], hipMemcpyHostToDevice));
/* execute transform */
PetscCallCUFFT(hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_BACKWARD));
/* transfer from GPU memory */
PetscCallCUDA(hipMemcpy(y_array, devArray, sizeof(hipfftComplex) * dim[ndim], hipMemcpyDeviceToHost));
PetscCall(VecRestoreArray(y, &y_array));
PetscCall(VecRestoreArray(x, &x_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
PetscFunctionBegin;
PetscCall(PetscFree(cufft->dim));
if (cufft->p_forward) PetscCallCUFFT(hipfftDestroy(cufft->p_forward));
if (cufft->p_backward) PetscCallCUFFT(hipfftDestroy(cufft->p_backward));
PetscCallCUDA(hipFree(cufft->devArray));
PetscCall(PetscFree(A->data));
PetscCall(PetscObjectChangeTypeName((PetscObject)A, 0));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides `MATSEQCUFFT` via the NVIDIA package CuFFT
Collective
Input Parameters:
+ comm - MPI communicator, set to `PETSC_COMM_SELF`
. ndim - the ndim-dimensional transform
- dim - array of size `ndim`, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Key:
. -mat_cufft_plannerflags - set CuFFT planner flags
Level: intermediate
.seealso: [](chapter_matrices), `Mat`, `MATSEQCUFFT`
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m = 1;
PetscFunctionBegin;
PetscCheck(ndim >= 0, PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %" PetscInt_FMT " must be > 0", ndim);
if (ndim) PetscValidIntPointer(dim, 3);
PetscValidPointer(A, 4);
PetscCall(MatCreate(comm, A));
for (PetscInt d = 0; d < ndim; ++d) {
PetscCheck(dim[d] >= 0, PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%" PetscInt_FMT "]=%" PetscInt_FMT " must be > 0", d, dim[d]);
m *= dim[d];
}
PetscCall(MatSetSizes(*A, m, m, m, m));
PetscCall(PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT));
PetscCall(PetscNew(&cufft));
(*A)->data = (void *)cufft;
PetscCall(PetscMalloc1(ndim + 1, &cufft->dim));
PetscCall(PetscArraycpy(cufft->dim, dim, ndim));
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
PetscCallCUDA(hipMalloc((void **)&cufft->devArray, sizeof(hipfftComplex) * m));
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options ...what options????? */
PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");
PetscOptionsEnd();
PetscFunctionReturn(PETSC_SUCCESS);
}
|
2aa037771b1f46c1f404ad0607266480f84bed36.cu
|
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/tests
*/
#include <petscdevice_cuda.h>
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
typedef struct {
PetscInt ndim;
PetscInt *dim;
cufftHandle p_forward, p_backward;
cufftComplex *devArray;
} Mat_CUFFT;
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
PetscFunctionBegin;
PetscCall(VecGetArray(x, &x_array));
PetscCall(VecGetArray(y, &y_array));
if (!cufft->p_forward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
PetscCallCUFFT(cufftPlan1d(&cufft->p_forward, dim[0], CUFFT_C2C, 1));
break;
case 2:
PetscCallCUFFT(cufftPlan2d(&cufft->p_forward, dim[0], dim[1], CUFFT_C2C));
break;
case 3:
PetscCallCUFFT(cufftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], CUFFT_C2C));
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %" PetscInt_FMT "-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
PetscCallCUDA(cudaMemcpy(devArray, x_array, sizeof(cufftComplex) * dim[ndim], cudaMemcpyHostToDevice));
/* execute transform */
PetscCallCUFFT(cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_FORWARD));
/* transfer from GPU memory */
PetscCallCUDA(cudaMemcpy(y_array, devArray, sizeof(cufftComplex) * dim[ndim], cudaMemcpyDeviceToHost));
PetscCall(VecRestoreArray(y, &y_array));
PetscCall(VecRestoreArray(x, &x_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
PetscFunctionBegin;
PetscCall(VecGetArray(x, &x_array));
PetscCall(VecGetArray(y, &y_array));
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
PetscCallCUFFT(cufftPlan1d(&cufft->p_backward, dim[0], CUFFT_C2C, 1));
break;
case 2:
PetscCallCUFFT(cufftPlan2d(&cufft->p_backward, dim[0], dim[1], CUFFT_C2C));
break;
case 3:
PetscCallCUFFT(cufftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], CUFFT_C2C));
break;
default:
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %" PetscInt_FMT "-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
PetscCallCUDA(cudaMemcpy(devArray, x_array, sizeof(cufftComplex) * dim[ndim], cudaMemcpyHostToDevice));
/* execute transform */
PetscCallCUFFT(cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_INVERSE));
/* transfer from GPU memory */
PetscCallCUDA(cudaMemcpy(y_array, devArray, sizeof(cufftComplex) * dim[ndim], cudaMemcpyDeviceToHost));
PetscCall(VecRestoreArray(y, &y_array));
PetscCall(VecRestoreArray(x, &x_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT *)A->data;
PetscFunctionBegin;
PetscCall(PetscFree(cufft->dim));
if (cufft->p_forward) PetscCallCUFFT(cufftDestroy(cufft->p_forward));
if (cufft->p_backward) PetscCallCUFFT(cufftDestroy(cufft->p_backward));
PetscCallCUDA(cudaFree(cufft->devArray));
PetscCall(PetscFree(A->data));
PetscCall(PetscObjectChangeTypeName((PetscObject)A, 0));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides `MATSEQCUFFT` via the NVIDIA package CuFFT
Collective
Input Parameters:
+ comm - MPI communicator, set to `PETSC_COMM_SELF`
. ndim - the ndim-dimensional transform
- dim - array of size `ndim`, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Key:
. -mat_cufft_plannerflags - set CuFFT planner flags
Level: intermediate
.seealso: [](chapter_matrices), `Mat`, `MATSEQCUFFT`
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m = 1;
PetscFunctionBegin;
PetscCheck(ndim >= 0, PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %" PetscInt_FMT " must be > 0", ndim);
if (ndim) PetscValidIntPointer(dim, 3);
PetscValidPointer(A, 4);
PetscCall(MatCreate(comm, A));
for (PetscInt d = 0; d < ndim; ++d) {
PetscCheck(dim[d] >= 0, PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%" PetscInt_FMT "]=%" PetscInt_FMT " must be > 0", d, dim[d]);
m *= dim[d];
}
PetscCall(MatSetSizes(*A, m, m, m, m));
PetscCall(PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT));
PetscCall(PetscNew(&cufft));
(*A)->data = (void *)cufft;
PetscCall(PetscMalloc1(ndim + 1, &cufft->dim));
PetscCall(PetscArraycpy(cufft->dim, dim, ndim));
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
PetscCallCUDA(cudaMalloc((void **)&cufft->devArray, sizeof(cufftComplex) * m));
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options ...what options????? */
PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");
PetscOptionsEnd();
PetscFunctionReturn(PETSC_SUCCESS);
}
|
4b63fa0656ebb9c167b91c5aa427f64b2ad40f85.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (33 * 1024)
__global__ void add(int *a, int *b, int * c)
{
//threadIdx.x:Index. blockIdx:index. blockDim.x:.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
//blockDim.x:. gridDim.x:.
tid += blockDim.x * gridDim.x;
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void**)&dev_a, N*sizeof(int));
hipMalloc((void**)&dev_b, N*sizeof(int));
hipMalloc((void**)&dev_c, N*sizeof(int));
for(int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i*i;
}
hipMemcpy(dev_a, a, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, N*sizeof(int), hipMemcpyHostToDevice);
//add<<<a, b>>>abb.(b512)
hipLaunchKernelGGL(( add), dim3(128), dim3(128), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
bool success = true;
for(int i = 0; i < N; ++i)
{
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d != %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success)
printf("We did it!\n");
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
4b63fa0656ebb9c167b91c5aa427f64b2ad40f85.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (33 * 1024)
__global__ void add(int *a, int *b, int * c)
{
//threadIdx.x:当前线程的Index. blockIdx:当前线程块的index. blockDim.x:每个线程块中线程的数量.
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
//blockDim.x:每个线程块中线程的数量. gridDim.x:线程格中线程块的数量.
tid += blockDim.x * gridDim.x;
}
}
int main()
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
for(int i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
//add<<<a, b>>>其中a表示设备在执行核函数时使用的并行线程块的数量,b表示一个线程块中有b个线程.(其中b不能超过512)
add<<<128, 128>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
bool success = true;
for(int i = 0; i < N; ++i)
{
if((a[i] + b[i]) != c[i]){
printf("Error: %d + %d != %d\n", a[i], b[i], c[i]);
success = false;
}
}
if(success)
printf("We did it!\n");
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
f19f6b2f0c0b17d92d33b62e3bc56f4b86c9531a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/device_uvector.hpp>
struct DispatcherTest : public cudf::test::BaseFixture {
};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {
};
TYPED_TEST_SUITE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()()
{
return std::is_same_v<Expected, Dispatched>;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id)
{
return id == cudf::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch)
{
auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1, cudf::get_default_stream());
hipLaunchKernelGGL(( dispatch_test_kernel), dim3(1), dim3(1), 0, cudf::get_default_stream().value(),
cudf::type_to_id<TypeParam>(), result.data());
CUDF_CUDA_TRY(hipDeviceSynchronize());
EXPECT_EQ(true, result.front_element(cudf::get_default_stream()));
}
struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType)
{
auto t = GetParam();
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t));
}
template <typename T>
struct TypedDoubleDispatcherTest : public DispatcherTest {
};
TYPED_TEST_SUITE(TypedDoubleDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected1, typename Expected2>
struct two_type_tester {
template <typename Dispatched1, typename Dispatched2>
bool operator()()
{
return std::is_same_v<Expected1, Dispatched1> && std::is_same_v<Expected2, Dispatched2>;
}
};
} // namespace
TYPED_TEST(TypedDoubleDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
cudf::data_type{cudf::type_to_id<TypeParam>()},
two_type_tester<TypeParam, TypeParam>{}));
}
namespace {
struct verify_double_dispatched_type {
template <typename T1, typename T2>
__host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2)
{
return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>();
}
};
__global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::double_type_dispatcher(
cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2);
}
} // namespace
TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch)
{
auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1, cudf::get_default_stream());
hipLaunchKernelGGL(( double_dispatch_test_kernel), dim3(1), dim3(1), 0, cudf::get_default_stream().value(),
cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data());
CUDF_CUDA_TRY(hipDeviceSynchronize());
EXPECT_EQ(true, result.front_element(cudf::get_default_stream()));
}
struct IdDoubleDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds,
IdDoubleDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDoubleDispatcherTest, IdToType)
{
// Test double-dispatch of all types using the same type for both dispatches
auto t = GetParam();
EXPECT_TRUE(cudf::double_type_dispatcher(
cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t));
}
struct IdFixedDoubleDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds,
IdFixedDoubleDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdFixedDoubleDispatcherTest, IdToType)
{
// Test double-dispatch of all types against one fixed type, in each direction
auto t = GetParam();
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t},
cudf::data_type{cudf::type_to_id<float>()},
verify_double_dispatched_type{},
t,
cudf::type_to_id<float>()));
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()},
cudf::data_type{t},
verify_double_dispatched_type{},
cudf::type_to_id<float>(),
t));
}
CUDF_TEST_PROGRAM_MAIN()
|
f19f6b2f0c0b17d92d33b62e3bc56f4b86c9531a.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_list_utilities.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/device_uvector.hpp>
struct DispatcherTest : public cudf::test::BaseFixture {
};
template <typename T>
struct TypedDispatcherTest : public DispatcherTest {
};
TYPED_TEST_SUITE(TypedDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected>
struct type_tester {
template <typename Dispatched>
bool operator()()
{
return std::is_same_v<Expected, Dispatched>;
}
};
} // namespace
TYPED_TEST(TypedDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
type_tester<TypeParam>{}));
}
namespace {
struct verify_dispatched_type {
template <typename T>
__host__ __device__ bool operator()(cudf::type_id id)
{
return id == cudf::type_to_id<T>();
}
};
__global__ void dispatch_test_kernel(cudf::type_id id, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::type_dispatcher(cudf::data_type{id}, verify_dispatched_type{}, id);
}
} // namespace
TYPED_TEST(TypedDispatcherTest, DeviceDispatch)
{
auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1, cudf::get_default_stream());
dispatch_test_kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(
cudf::type_to_id<TypeParam>(), result.data());
CUDF_CUDA_TRY(cudaDeviceSynchronize());
EXPECT_EQ(true, result.front_element(cudf::get_default_stream()));
}
struct IdDispatcherTest : public DispatcherTest, public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds, IdDispatcherTest, testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDispatcherTest, IdToType)
{
auto t = GetParam();
EXPECT_TRUE(cudf::type_dispatcher(cudf::data_type{t}, verify_dispatched_type{}, t));
}
template <typename T>
struct TypedDoubleDispatcherTest : public DispatcherTest {
};
TYPED_TEST_SUITE(TypedDoubleDispatcherTest, cudf::test::AllTypes);
namespace {
template <typename Expected1, typename Expected2>
struct two_type_tester {
template <typename Dispatched1, typename Dispatched2>
bool operator()()
{
return std::is_same_v<Expected1, Dispatched1> && std::is_same_v<Expected2, Dispatched2>;
}
};
} // namespace
TYPED_TEST(TypedDoubleDispatcherTest, TypeToId)
{
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<TypeParam>()},
cudf::data_type{cudf::type_to_id<TypeParam>()},
two_type_tester<TypeParam, TypeParam>{}));
}
namespace {
struct verify_double_dispatched_type {
template <typename T1, typename T2>
__host__ __device__ bool operator()(cudf::type_id id1, cudf::type_id id2)
{
return id1 == cudf::type_to_id<T1>() && id2 == cudf::type_to_id<T2>();
}
};
__global__ void double_dispatch_test_kernel(cudf::type_id id1, cudf::type_id id2, bool* d_result)
{
if (0 == threadIdx.x + blockIdx.x * blockDim.x)
*d_result = cudf::double_type_dispatcher(
cudf::data_type{id1}, cudf::data_type{id2}, verify_double_dispatched_type{}, id1, id2);
}
} // namespace
TYPED_TEST(TypedDoubleDispatcherTest, DeviceDoubleDispatch)
{
auto result = cudf::detail::make_zeroed_device_uvector_sync<bool>(1, cudf::get_default_stream());
double_dispatch_test_kernel<<<1, 1, 0, cudf::get_default_stream().value()>>>(
cudf::type_to_id<TypeParam>(), cudf::type_to_id<TypeParam>(), result.data());
CUDF_CUDA_TRY(cudaDeviceSynchronize());
EXPECT_EQ(true, result.front_element(cudf::get_default_stream()));
}
struct IdDoubleDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds,
IdDoubleDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdDoubleDispatcherTest, IdToType)
{
// Test double-dispatch of all types using the same type for both dispatches
auto t = GetParam();
EXPECT_TRUE(cudf::double_type_dispatcher(
cudf::data_type{t}, cudf::data_type{t}, verify_double_dispatched_type{}, t, t));
}
struct IdFixedDoubleDispatcherTest : public DispatcherTest,
public testing::WithParamInterface<cudf::type_id> {
};
INSTANTIATE_TEST_CASE_P(TestAllIds,
IdFixedDoubleDispatcherTest,
testing::ValuesIn(cudf::test::all_type_ids));
TEST_P(IdFixedDoubleDispatcherTest, IdToType)
{
// Test double-dispatch of all types against one fixed type, in each direction
auto t = GetParam();
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{t},
cudf::data_type{cudf::type_to_id<float>()},
verify_double_dispatched_type{},
t,
cudf::type_to_id<float>()));
EXPECT_TRUE(cudf::double_type_dispatcher(cudf::data_type{cudf::type_to_id<float>()},
cudf::data_type{t},
verify_double_dispatched_type{},
cudf::type_to_id<float>(),
t));
}
CUDF_TEST_PROGRAM_MAIN()
|
6d09d5708d7943b6598135a47a318430507ae7ea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_deconv_layer.hpp"
namespace caffe {
__global__ void sync_deconv_groups() {}
template <typename Dtype>
void CuDNNDeconvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
const Dtype *weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[g], cudnn::dataType<Dtype>::one, filter_desc_,
weight + this->weight_offset_ * g, bottom_descs_[i],
bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_data_algo_[i],
workspace[g], workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero, top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype *bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one, top_descs_[i],
top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_deconv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNDeconvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
const Dtype *weight = NULL;
Dtype *weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype *bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(
handle_[0 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one, bias_desc_,
bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype *bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g, bottom_descs_[i],
bottom_data + bottom_offset_ * g, conv_descs_[i],
bwd_filter_algo_[i], workspace[1 * this->group_ + g],
workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype *bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionForward(
handle_[2 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g, filter_desc_,
weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i],
workspace[2 * this->group_ + g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero, bottom_descs_[i],
bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_deconv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer);
} // namespace caffe
#endif
|
6d09d5708d7943b6598135a47a318430507ae7ea.cu
|
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_deconv_layer.hpp"
namespace caffe {
__global__ void sync_deconv_groups() {}
template <typename Dtype>
void CuDNNDeconvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
const Dtype *weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype *bottom_data = bottom[i]->gpu_data();
Dtype *top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[g], cudnn::dataType<Dtype>::one, filter_desc_,
weight + this->weight_offset_ * g, bottom_descs_[i],
bottom_data + bottom_offset_ * g, conv_descs_[i], bwd_data_algo_[i],
workspace[g], workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero, top_descs_[i],
top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype *bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one, top_descs_[i],
top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_deconv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNDeconvolutionLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
const Dtype *weight = NULL;
Dtype *weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype *bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype *top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(
handle_[0 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one, bias_desc_,
bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype *bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g, bottom_descs_[i],
bottom_data + bottom_offset_ * g, conv_descs_[i],
bwd_filter_algo_[i], workspace[1 * this->group_ + g],
workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype *bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionForward(
handle_[2 * this->group_ + g], cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g, filter_desc_,
weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i],
workspace[2 * this->group_ + g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero, bottom_descs_[i],
bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_deconv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer);
} // namespace caffe
#endif
|
7c40cbe4b0b15b4455e9530351c785552513b0e1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cuda/array_abstraction.hpp>
#include <common/cu_error.hpp>
#include <cuda/noise.hpp>
#include <ctime>
typedef float Float;
typedef Array::CPUArray<Float> CPUArray;
typedef Array::GPUArray<Float> GPUArray;
using Array::AsCmpl;
int main(int argc, char* argv[]){
if(argc != 5){
std::cerr << "usage: " << argv[0] << " <n0> <n1> <n2> <outfile.npy> # n0 >> n1 >> n2" << std::endl;
exit(EXIT_FAILURE);
}
CUERR(hipSetDevice(0));
int3 shape = {atoi(argv[1]), atoi(argv[2]), atoi(argv[3])};
std::string outfile(argv[4]);
CPUArray host_array(shape);
host_array.set_to(0);
GPUArray dev_array(host_array);
NoiseHostApi<Float> noise(clock() % 1234);
noise.fill_kspace(dev_array);
dev_array >> host_array;
host_array.save<AsCmpl>(outfile);
return 0;
}
|
7c40cbe4b0b15b4455e9530351c785552513b0e1.cu
|
#include <iostream>
#include <cuda/array_abstraction.hpp>
#include <common/cu_error.hpp>
#include <cuda/noise.hpp>
#include <ctime>
typedef float Float;
typedef Array::CPUArray<Float> CPUArray;
typedef Array::GPUArray<Float> GPUArray;
using Array::AsCmpl;
int main(int argc, char* argv[]){
if(argc != 5){
std::cerr << "usage: " << argv[0] << " <n0> <n1> <n2> <outfile.npy> # n0 >> n1 >> n2" << std::endl;
exit(EXIT_FAILURE);
}
CUERR(cudaSetDevice(0));
int3 shape = {atoi(argv[1]), atoi(argv[2]), atoi(argv[3])};
std::string outfile(argv[4]);
CPUArray host_array(shape);
host_array.set_to(0);
GPUArray dev_array(host_array);
NoiseHostApi<Float> noise(clock() % 1234);
noise.fill_kspace(dev_array);
dev_array >> host_array;
host_array.save<AsCmpl>(outfile);
return 0;
}
|
billionga.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "config.h"
#include "util.h"
#include "cuda-util.h"
#include "mtgp-1.1/mtgp32-cuda.h"
#include "billionga.h"
//#define SHOW_PROB_VECTOR_BITS 16
#define SHOW_PROB_VECTOR_BITS 128
#define SHOW_SAMPLE_BITS 128
#define SAMPLE_PROB_VECTOR_BLOCKS 64
#define SAMPLE_PROB_VECTOR_THREADS 256
#define SAMPLE_PROB_VECTOR_SHMEM (SAMPLE_PROB_VECTOR_THREADS >> 5)
#define UPDATE_PROB_VECTOR_BLOCKS 128
#define UPDATE_PROB_VECTOR_THREADS 512
#define UPDATE_PROB_VECTOR_SHMEM (UPDATE_PROB_VECTOR_THREADS >> 5)
// Paso 1 del algoritmo.
void bga_initialization(struct bga_state *state, long number_of_bits, int number_of_prob_vectors, int number_of_samples) {
state->number_of_bits = number_of_bits;
state->number_of_samples = number_of_samples;
state->number_of_prob_vectors = number_of_prob_vectors;
state->population_size = POPULATION_SIZE;
state->max_prob_sum = (number_of_bits * POPULATION_SIZE);
//#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Initializing Billion GA ====================\n");
fprintf(stdout, "[INFO] Problem size : %ld\n", number_of_bits);
fprintf(stdout, "[INFO] Population size : %d\n", state->population_size);
fprintf(stdout, "[INFO] Num. of vectors : %d\n", state->number_of_prob_vectors);
fprintf(stdout, "[INFO] Delta : %d\n", DELTA);
fprintf(stdout, "[INFO] Prob. min. value: %d\n", MIN_PVALUE);
fprintf(stdout, "[INFO] Prob. max. value: %d\n", MAX_PVALUE);
//#endif
// === Pido la memoria =============================================================
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Solicitando memoria =======================\n");
#endif
// === Pido la memoria para el vector de probabilidades ==================================
state->prob_vector_bit_count = state->number_of_bits / number_of_prob_vectors;
int bits_left = state->number_of_bits % number_of_prob_vectors;
if (bits_left == 0) {
state->last_prob_vector_bit_count = state->prob_vector_bit_count;
} else {
state->last_prob_vector_bit_count = bits_left;
}
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d prob_vector_size CPU memory\n", state->number_of_prob_vectors);
//#endif
size_t prob_vectors_acc_prob_array_size = sizeof(long) * state->number_of_prob_vectors;
state->prob_vectors_acc_prob = (long*)malloc(prob_vectors_acc_prob_array_size);
if (!state->prob_vectors_acc_prob) {
fprintf(stderr, "[ERROR] Requesting CPU memory for the prob_vectors_acc_prob\n");
exit(EXIT_FAILURE);
}
size_t prob_vector_array_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_prob_vectors = (int**)malloc(prob_vector_array_size);
if (!state->gpu_prob_vectors) {
fprintf(stderr, "[ERROR] Requesting CPU memory for the prob_vector\n");
exit(EXIT_FAILURE);
}
// === Pido la memoria para los samples ==================================================
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d samples CPU memory\n", state->number_of_samples);
//#endif
size_t samples_vector_fitness_array_size = sizeof(int*) * state->number_of_samples;
state->samples_vector_fitness = (int**)malloc(samples_vector_fitness_array_size);
if (!state->samples_vector_fitness) {
fprintf(stderr, "[ERROR] Requesting samples_vector_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
size_t samples_array_size = sizeof(int*) * state->number_of_samples;
state->gpu_samples = (int***)malloc(samples_array_size);
if (!state->gpu_samples) {
fprintf(stderr, "[ERROR] Requesting samples_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting CPU memory for sample %d vectors array\n", sample_number);
//#endif
size_t samples_vector_array_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_samples[sample_number] = (int**)malloc(samples_vector_array_size);
if (!state->gpu_samples) {
fprintf(stderr, "[ERROR] > Requesting CPU memory for sample_vector_array[%d]\n", sample_number);
exit(EXIT_FAILURE);
}
size_t samples_vector_fitness_size = sizeof(int) * state->number_of_prob_vectors;
state->samples_vector_fitness[sample_number] = (int*)malloc(samples_vector_fitness_size);
if (!state->samples_vector_fitness[sample_number]) {
fprintf(stderr, "[ERROR] Requesting samples_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
}
size_t samples_fitness_size = sizeof(int*) * state->number_of_samples;
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting samples_fitness CPU memory (size: %i)\n", state->number_of_samples);
//#endif
state->samples_fitness = (int*)malloc(samples_fitness_size);
if (!state->samples_fitness) {
fprintf(stderr, "[ERROR] > Requesting CPU memory for samples_fitness_size\n");
exit(EXIT_FAILURE);
}
// === Memoria auxiliar ==================================================================
size_t gpu_int32_vector_sum_size = sizeof(long*) * state->number_of_prob_vectors;
state->gpu_int32_vector_sum = (long**)malloc(gpu_int32_vector_sum_size);
size_t cpu_int32_vector_sum_size = sizeof(long*) * state->number_of_prob_vectors;
state->cpu_int32_vector_sum = (long**)malloc(cpu_int32_vector_sum_size);
size_t gpu_bit_vector_sum_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_bit_vector_sum = (int**)malloc(gpu_bit_vector_sum_size);
size_t cpu_bit_vector_sum_size = sizeof(int*) * state->number_of_prob_vectors;
state->cpu_bit_vector_sum = (int**)malloc(cpu_bit_vector_sum_size);
}
void bga_initialize_thread(struct bga_state *state, int prob_vector_number) {
hipError_t error;
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
//#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Solicitando memoria para el thread %d =====\n", prob_vector_number);
//#endif
// === Pido la memoria para el vector de probabilidades ==================================
{
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
size_t prob_vector_size = sizeof(int) * current_prob_vector_number_of_bits;
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting %d bits GPU memory for prob_vector %d (size: %i / %lu Mb)\n",
current_prob_vector_number_of_bits, prob_vector_number, current_prob_vector_number_of_bits,
prob_vector_size >> 20);
//#endif
error = hipMalloc((void**)&(state->gpu_prob_vectors[prob_vector_number]), prob_vector_size);
if (error != hipSuccess) {
fprintf(stderr, "[ERROR] Requesting GPU memory for prob_vector_number[%d]\n", prob_vector_number);
exit(EXIT_FAILURE);
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "TIME] Processing time: %f (ms)\n", gputime);
ccudaEventRecord(start, 0);
#endif
// === Pido la memoria para los samples ==================================================
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d samples CPU memory\n", state->number_of_samples);
//#endif
{
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
size_t sample_vector_size = sizeof(int) * (current_prob_vector_number_of_bits >> 5);
int right_size = current_prob_vector_number_of_bits & ((1<<5)-1);
assert(right_size == 0);
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting sample %d GPU memory for vector %d (size: %i / %lu Mb)\n",
sample_number, prob_vector_number, current_prob_vector_number_of_bits >> 5, sample_vector_size >> 20);
//#endif
error = hipMalloc((void**)&(state->gpu_samples[sample_number][prob_vector_number]), sample_vector_size);
if (error != hipSuccess) {
fprintf(stderr, "[ERROR] > Requesting GPU memory for sample_number[%d]\n", sample_number);
exit(EXIT_FAILURE);
}
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "TIME] Processing time: %f (ms)\n", gputime);
ccudaEventRecord(start, 0);
#endif
// === Inicializo el vector de probabilidades ============================================
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Inicializando memoria =======================\n");
#endif
#if defined(TIMMING)
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
{
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
#ifdef INFO
fprintf(stdout, "[INFO] Inicializando GPU memory of prob_vector %d (%d bits)\n",
prob_vector_number, current_prob_vector_number_of_bits);
#endif
vector_set_int(state->gpu_prob_vectors[prob_vector_number],
current_prob_vector_number_of_bits, INIT_PROB_VECTOR_VALUE);
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
// === Memoria auxiliar ==================================================================
vector_sum_int_alloc(&(state->gpu_int32_vector_sum[prob_vector_number]),
&(state->cpu_int32_vector_sum[prob_vector_number]));
vector_sum_bit_alloc(&(state->gpu_bit_vector_sum[prob_vector_number]),
&(state->cpu_bit_vector_sum[prob_vector_number]));
}
long bga_get_part_accumulated_prob(struct bga_state *state, int prob_vector_number) {
vector_sum_int_init(state->gpu_int32_vector_sum[prob_vector_number]);
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sum_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[prob_vector_number],
current_prob_vector_number_of_bits);
state->prob_vectors_acc_prob[prob_vector_number] = vector_sum_int_get(
state->gpu_int32_vector_sum[prob_vector_number],
state->cpu_int32_vector_sum[prob_vector_number]);
return state->prob_vectors_acc_prob[prob_vector_number];
}
long bga_get_part_stats_prob(struct bga_state *state, int prob_vector_number, int op, int value) {
vector_sp_int_init(state->gpu_int32_vector_sum[prob_vector_number]);
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sp_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[prob_vector_number],
current_prob_vector_number_of_bits, op, value);
long result;
result = vector_sp_int_get(
state->gpu_int32_vector_sum[prob_vector_number],
state->cpu_int32_vector_sum[prob_vector_number]);
return result;
}
long bga_get_full_accumulated_prob(struct bga_state *state) {
long result = 0;
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
result += state->prob_vectors_acc_prob[prob_vector_number];
}
return result;
}
void bga_show_prob_vector_state(struct bga_state *state) {
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
fprintf(stdout, "[INFO] === Probability vector status =======================\n");
vector_sum_int_init(state->gpu_int32_vector_sum[0]);
vector_sum_int_show(state->gpu_int32_vector_sum[0], state->cpu_int32_vector_sum[0]);
fprintf(stdout, "[INFO] Prob. vector sample:");
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
if (prob_vector_number == 0) {
int probs_to_show_count = SHOW_PROB_VECTOR_BITS;
if (current_prob_vector_number_of_bits < SHOW_PROB_VECTOR_BITS)
probs_to_show_count = state->prob_vector_bit_count;
int *probs_to_show = (int*)malloc(sizeof(int) * probs_to_show_count);
ccudaMemcpy(probs_to_show, state->gpu_prob_vectors[prob_vector_number],
sizeof(uint32_t) * probs_to_show_count, hipMemcpyDeviceToHost);
long sum = 0;
for (int i = 0; i < probs_to_show_count; i++) {
fprintf(stdout, " %d (%.4f)", probs_to_show[i], (float)probs_to_show[i] / (float)state->population_size);
sum += probs_to_show[i];
}
fprintf(stdout, "... Total [%d]: %ld ( %f )\n", probs_to_show_count, sum, (float)sum / (float)(probs_to_show_count * state->population_size));
free(probs_to_show);
}
vector_sum_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[0], current_prob_vector_number_of_bits);
}
long accumulated_probability = 0;
accumulated_probability = vector_sum_int_get(
state->gpu_int32_vector_sum[0],
state->cpu_int32_vector_sum[0]);
fprintf(stdout, "[INFO] Prob. vector accumulated probability (%ld / %ld): %f\n",
accumulated_probability, state->max_prob_sum,
(float)accumulated_probability / (float)state->max_prob_sum);
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void bga_compute_sample_part_fitness(struct bga_state *state, int prob_vector_number) {
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
#if defined(INFO)
fprintf(stdout, "[INFO] === Sample vectors fitness =============================\n");
#endif
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
vector_sum_bit_init(state->gpu_bit_vector_sum[prob_vector_number]);
#if defined(DEBUG)
fprintf(stdout, "[INFO] Computing sample vector %d fitness: ", sample_number);
#endif
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sum_bit(state->gpu_samples[sample_number][prob_vector_number],
state->gpu_bit_vector_sum[prob_vector_number], current_prob_vector_number_of_bits);
state->samples_vector_fitness[sample_number][prob_vector_number] = vector_sum_bit_get(
state->gpu_bit_vector_sum[prob_vector_number],
state->cpu_bit_vector_sum[prob_vector_number]);
#if defined(DEBUG)
fprintf(stdout, "%d\n", state->samples_vector_fitness[sample_number][prob_vector_number]);
#endif
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void bga_compute_sample_full_fitness(struct bga_state *state) {
int result;
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
result = 0;
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
result += state->samples_vector_fitness[sample_number][prob_vector_number];
}
state->samples_fitness[sample_number] = result;
}
}
void bga_show_samples(struct bga_state *state) {
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
fprintf(stdout, "[INFO] === Sample vectors =====================================\n");
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
fprintf(stdout, "[INFO] Sample vector sample (%d):", sample_number);
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
if (prob_vector_number == 0) {
int bits_to_show_count = SHOW_SAMPLE_BITS;
if (current_prob_vector_number_of_bits < SHOW_SAMPLE_BITS)
bits_to_show_count = state->prob_vector_bit_count;
int bytes_to_show_count = bits_to_show_count >> 5;
int *bytes_to_show = (int*)malloc(sizeof(int) * bytes_to_show_count);
ccudaMemcpy(bytes_to_show, state->gpu_samples[sample_number][prob_vector_number],
sizeof(uint32_t) * bytes_to_show_count, hipMemcpyDeviceToHost);
for (int i = 0; i < bytes_to_show_count; i++) {
fprintf(stdout, " %s", int_to_binary(bytes_to_show[i]));
}
free(bytes_to_show);
fprintf(stdout, "...\n");
}
fprintf(stdout, "[INFO] Prob. vector %d, sample %d >> fitness: %d\n", prob_vector_number, sample_number, state->samples_vector_fitness[sample_number][prob_vector_number]);
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
__global__ void kern_sample_prob_vector(int *gpu_prob_vector, int prob_vector_size,
int prob_vector_starting_pos, float *prng_vector, int prng_vector_size, int *gpu_sample,
int population_size) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int samples_per_loop = gridDim.x * blockDim.x;
int max_samples_doable = prob_vector_size - prob_vector_starting_pos;
if (max_samples_doable > prng_vector_size) max_samples_doable = prng_vector_size;
int loops_count = max_samples_doable / samples_per_loop;
if (max_samples_doable % samples_per_loop > 0) loops_count++;
__shared__ int current_block_sample[SAMPLE_PROB_VECTOR_THREADS];
int prob_vector_position;
int prng_position;
int block_starting_pos;
for (int loop = 0; loop < loops_count; loop++) {
// 0 por defecto.
current_block_sample[tid] = 0;
// Cada loop genera blockDim.x bits y los guarda en el array de __shared__ memory.
block_starting_pos = (samples_per_loop * loop) + (bid * blockDim.x);
prng_position = block_starting_pos + tid;
prob_vector_position = prob_vector_starting_pos + prng_position;
if (prng_position < max_samples_doable) {
if ((gpu_prob_vector[prob_vector_position] + population_size) >= (prng_vector[prng_position] * population_size)) {
// 1
current_block_sample[tid] = 1 << (tid & ((1 << 5)-1));
}
}
__syncthreads();
if ((tid << 5) < SAMPLE_PROB_VECTOR_THREADS) {
int aux = current_block_sample[tid << 5];
#pragma unroll
for (int i = 1; i < 32; i++) {
aux = aux | current_block_sample[(tid << 5)+i];
}
int sample_pos = prob_vector_starting_pos + block_starting_pos;
if ((sample_pos + (tid << 5)) < prob_vector_size) {
gpu_sample[(sample_pos >> 5) + tid] = aux;
}
}
__syncthreads();
}
}
// Paso 2 del algoritmo.
void bga_model_sampling_mt(struct bga_state *state, mtgp32_status *mt_status, int prob_vector_number) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] === Sampling the model =======================\n");
#endif
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
hipEvent_t start_inner;
hipEvent_t end_inner;
#endif
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] > Sample %d ", sample_number);
#endif
#if defined(TIMMING)
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
ccudaEventCreate(&start_inner);
ccudaEventCreate(&end_inner);
#endif
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
int total_loops;
total_loops = current_prob_vector_number_of_bits / mt_status->numbers_per_gen;
if (current_prob_vector_number_of_bits % mt_status->numbers_per_gen > 0) total_loops++;
int prob_vector_starting_pos;
for (int loop = 0; loop < total_loops; loop++) {
prob_vector_starting_pos = mt_status->numbers_per_gen * loop;
// Genero nmeros aleatorios.
#if defined(TIMMING)
fprintf(stdout, "[TIME] Generate mtgp32_generate_float\n", gputime);
ccudaEventRecord(start_inner, 0);
#endif
mtgp32_generate_float(mt_status);
#if defined(TIMMING)
ccudaEventRecord(end_inner, 0);
ccudaEventSynchronize(end_inner);
ccudaEventElapsedTime(&gputime, start_inner, end_inner);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
#endif
#if defined(DEBUG)
fprintf(stdout, ".");
#endif
#if defined(TIMMING)
fprintf(stdout, "[TIME] Generate kern_sample_prob_vector\n", gputime);
ccudaEventRecord(start_inner, 0);
#endif
// Sampleo el vector de prob. con los nmeros aleatorios generados.
hipLaunchKernelGGL(( kern_sample_prob_vector), dim3(SAMPLE_PROB_VECTOR_BLOCKS), dim3(SAMPLE_PROB_VECTOR_THREADS), 0, 0,
state->gpu_prob_vectors[prob_vector_number], current_prob_vector_number_of_bits,
prob_vector_starting_pos, (float*)mt_status->d_data, mt_status->numbers_per_gen,
state->gpu_samples[sample_number][prob_vector_number], state->population_size);
#if defined(TIMMING)
ccudaEventRecord(end_inner, 0);
ccudaEventSynchronize(end_inner);
ccudaEventElapsedTime(&gputime, start_inner, end_inner);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
#endif
}
#if defined(DEBUG)
fprintf(stdout, "(%d)\n", total_loops);
#endif
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Total processing time: %f (ms)\n", gputime);
#endif
}
//bga_show_samples(state);
#if defined(TIMMING)
ccudaEventDestroy(start_inner);
ccudaEventDestroy(end_inner);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void cpu_model_update(int *gpu_prob_vector, int prob_vector_size,
int *gpu_best_sample, int *gpu_worst_sample) {
int *prob_vector = (int*)malloc(sizeof(int) * prob_vector_size);
ccudaMemcpy(prob_vector, gpu_prob_vector, sizeof(uint32_t) * prob_vector_size,
hipMemcpyDeviceToHost);
long current_acc_prob = 0;
for (int i = 0; i < prob_vector_size; i++) {
current_acc_prob += prob_vector[i];
}
int sample_size = prob_vector_size >> 5;
int *best_sample = (int*)malloc(sizeof(int) * sample_size);
int *worst_sample = (int*)malloc(sizeof(int) * sample_size);
ccudaMemcpy(best_sample, gpu_best_sample,
sizeof(uint32_t) * sample_size, hipMemcpyDeviceToHost);
ccudaMemcpy(worst_sample, gpu_worst_sample,
sizeof(uint32_t) * sample_size, hipMemcpyDeviceToHost);
int best_sample_current_bit_value;
int worst_sample_current_bit_value;
int delta;
for (int i = 0; i < prob_vector_size; i++) {
int bit_pos = i & ((1 << 5)-1);
int int_pos = i >> 5;
best_sample_current_bit_value = (best_sample[int_pos] & (1 << bit_pos)) >> bit_pos;
worst_sample_current_bit_value = (worst_sample[int_pos] & (1 << bit_pos)) >> bit_pos;
delta = best_sample_current_bit_value - worst_sample_current_bit_value;
prob_vector[i] += delta;
}
long new_acc_prob = 0;
for (int i = 0; i < prob_vector_size; i++) {
new_acc_prob += prob_vector[i];
}
fprintf(stdout, "[DEBUG][CPU] Acc. prob. => Current %ld , New %ld (delta: %ld )\n",
current_acc_prob, new_acc_prob, new_acc_prob - current_acc_prob);
free(prob_vector);
free(best_sample);
free(worst_sample);
}
__global__ void kern_model_update(int *gpu_prob_vector, int prob_vector_size,
int *best_sample, int *worst_sample, int max_value) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ int best_sample_part[UPDATE_PROB_VECTOR_SHMEM];
__shared__ int worst_sample_part[UPDATE_PROB_VECTOR_SHMEM];
int loop_size = gridDim.x * blockDim.x;
int loop_count = prob_vector_size / loop_size;
if (prob_vector_size % loop_size > 0) loop_count++;
int prob_vector_position;
int block_starting_pos;
const int tid_int = tid >> 5;
const int tid_bit = tid & ((1 << 5)-1);
int best_sample_current_bit_value;
int worst_sample_current_bit_value;
int delta;
for (int loop = 0; loop < loop_count; loop++) {
block_starting_pos = (loop_size * loop) + (bid * blockDim.x);
if (tid < UPDATE_PROB_VECTOR_SHMEM) {
if ((block_starting_pos + (tid << 5)) < prob_vector_size) {
best_sample_part[tid] = best_sample[(block_starting_pos >> 5) + tid];
worst_sample_part[tid] = worst_sample[(block_starting_pos >> 5) + tid];
}
}
__syncthreads();
prob_vector_position = block_starting_pos + tid;
if (prob_vector_position < prob_vector_size) {
best_sample_current_bit_value = (best_sample_part[tid_int] & (1 << tid_bit)) >> tid_bit;
worst_sample_current_bit_value = (worst_sample_part[tid_int] & (1 << tid_bit)) >> tid_bit;
delta = (best_sample_current_bit_value - worst_sample_current_bit_value) * DELTA;
int aux = gpu_prob_vector[prob_vector_position];
aux = aux + delta;
if (aux < MIN_PVALUE) {
aux = MIN_PVALUE;
} else if (aux > MAX_PVALUE) {
aux = MAX_PVALUE;
}
gpu_prob_vector[prob_vector_position] = aux;
}
}
}
// Paso 4 y 5 del algoritmo.
void bga_model_update(struct bga_state *state, int prob_vector_number) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] === Updating the model =======================\n");
#endif
#if defined(TIMMING)
float gputime;
hipEvent_t start;
hipEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
assert(state->number_of_samples == 2);
int best_sample_index, worst_sample_index;
long fitness_sample_a, fitness_sample_b;
#if defined(FULL_FITNESS_UPDATE)
fitness_sample_a = state->samples_fitness[0];
fitness_sample_b = state->samples_fitness[1];
#endif
#if defined(PARTIAL_FITNESS_UPDATE)
fitness_sample_a = state->samples_vector_fitness[0][prob_vector_number];
fitness_sample_b = state->samples_vector_fitness[1][prob_vector_number];
#endif
#if defined(DEBUG)
fprintf(stdout, "[INFO] prob. vector[%d] fitness_sample_a=%ld fitness_sample_a=%ld\n",
prob_vector_number, fitness_sample_a, fitness_sample_b);
#endif
if (fitness_sample_a >= fitness_sample_b) {
best_sample_index = 0;
worst_sample_index = 1;
}
else {
best_sample_index = 1;
worst_sample_index = 0;
}
#if defined(DEBUG)
fprintf(stdout, "[INFO] prob. vector[%d] best=%d, worst=%d\n",
prob_vector_number, best_sample_index, worst_sample_index);
#endif
int *best_sample;
int *worst_sample;
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
best_sample = state->gpu_samples[best_sample_index][prob_vector_number];
worst_sample = state->gpu_samples[worst_sample_index][prob_vector_number];
/*cpu_model_update(state->gpu_prob_vectors[prob_vector_number],
current_prob_vector_number_of_bits,
best_sample, worst_sample);
*/
hipLaunchKernelGGL(( kern_model_update) , dim3(UPDATE_PROB_VECTOR_BLOCKS), dim3(UPDATE_PROB_VECTOR_THREADS) , 0, 0,
state->gpu_prob_vectors[prob_vector_number], current_prob_vector_number_of_bits,
best_sample, worst_sample, state->population_size);
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
// Libera la memoria pedida para de estado.
void bga_free(struct bga_state *state) {
#ifdef INFO
fprintf(stdout, "[INFO] Freeing memory\n");
#endif
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing gpu_prob_vectors[%d]\n", vector_number);
hipFree(state->gpu_prob_vectors[vector_number]);
}
free(state->gpu_prob_vectors);
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing gpu_samples[%d][%d]\n", sample_number, vector_number);
hipFree(state->gpu_samples[sample_number][vector_number]);
}
free(state->gpu_samples[sample_number]);
}
free(state->gpu_samples);
free(state->samples_fitness);
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing vector_sum_float_free[%d]\n", vector_number);
vector_sum_int_free(
state->gpu_int32_vector_sum[vector_number],
state->cpu_int32_vector_sum[vector_number]);
fprintf(stderr, "[INFO] Freeing vector_sum_bit_free[%d]\n", vector_number);
vector_sum_bit_free(
state->gpu_bit_vector_sum[vector_number],
state->cpu_bit_vector_sum[vector_number]);
}
free(state->gpu_int32_vector_sum);
free(state->cpu_int32_vector_sum);
free(state->gpu_bit_vector_sum);
free(state->cpu_bit_vector_sum);
}
|
billionga.cu
|
#include <assert.h>
#include <cuda.h>
#include <math.h>
#include "config.h"
#include "util.h"
#include "cuda-util.h"
#include "mtgp-1.1/mtgp32-cuda.h"
#include "billionga.h"
//#define SHOW_PROB_VECTOR_BITS 16
#define SHOW_PROB_VECTOR_BITS 128
#define SHOW_SAMPLE_BITS 128
#define SAMPLE_PROB_VECTOR_BLOCKS 64
#define SAMPLE_PROB_VECTOR_THREADS 256
#define SAMPLE_PROB_VECTOR_SHMEM (SAMPLE_PROB_VECTOR_THREADS >> 5)
#define UPDATE_PROB_VECTOR_BLOCKS 128
#define UPDATE_PROB_VECTOR_THREADS 512
#define UPDATE_PROB_VECTOR_SHMEM (UPDATE_PROB_VECTOR_THREADS >> 5)
// Paso 1 del algoritmo.
void bga_initialization(struct bga_state *state, long number_of_bits, int number_of_prob_vectors, int number_of_samples) {
state->number_of_bits = number_of_bits;
state->number_of_samples = number_of_samples;
state->number_of_prob_vectors = number_of_prob_vectors;
state->population_size = POPULATION_SIZE;
state->max_prob_sum = (number_of_bits * POPULATION_SIZE);
//#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Initializing Billion GA ====================\n");
fprintf(stdout, "[INFO] Problem size : %ld\n", number_of_bits);
fprintf(stdout, "[INFO] Population size : %d\n", state->population_size);
fprintf(stdout, "[INFO] Num. of vectors : %d\n", state->number_of_prob_vectors);
fprintf(stdout, "[INFO] Delta : %d\n", DELTA);
fprintf(stdout, "[INFO] Prob. min. value: %d\n", MIN_PVALUE);
fprintf(stdout, "[INFO] Prob. max. value: %d\n", MAX_PVALUE);
//#endif
// === Pido la memoria =============================================================
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Solicitando memoria =======================\n");
#endif
// === Pido la memoria para el vector de probabilidades ==================================
state->prob_vector_bit_count = state->number_of_bits / number_of_prob_vectors;
int bits_left = state->number_of_bits % number_of_prob_vectors;
if (bits_left == 0) {
state->last_prob_vector_bit_count = state->prob_vector_bit_count;
} else {
state->last_prob_vector_bit_count = bits_left;
}
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d prob_vector_size CPU memory\n", state->number_of_prob_vectors);
//#endif
size_t prob_vectors_acc_prob_array_size = sizeof(long) * state->number_of_prob_vectors;
state->prob_vectors_acc_prob = (long*)malloc(prob_vectors_acc_prob_array_size);
if (!state->prob_vectors_acc_prob) {
fprintf(stderr, "[ERROR] Requesting CPU memory for the prob_vectors_acc_prob\n");
exit(EXIT_FAILURE);
}
size_t prob_vector_array_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_prob_vectors = (int**)malloc(prob_vector_array_size);
if (!state->gpu_prob_vectors) {
fprintf(stderr, "[ERROR] Requesting CPU memory for the prob_vector\n");
exit(EXIT_FAILURE);
}
// === Pido la memoria para los samples ==================================================
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d samples CPU memory\n", state->number_of_samples);
//#endif
size_t samples_vector_fitness_array_size = sizeof(int*) * state->number_of_samples;
state->samples_vector_fitness = (int**)malloc(samples_vector_fitness_array_size);
if (!state->samples_vector_fitness) {
fprintf(stderr, "[ERROR] Requesting samples_vector_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
size_t samples_array_size = sizeof(int*) * state->number_of_samples;
state->gpu_samples = (int***)malloc(samples_array_size);
if (!state->gpu_samples) {
fprintf(stderr, "[ERROR] Requesting samples_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting CPU memory for sample %d vectors array\n", sample_number);
//#endif
size_t samples_vector_array_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_samples[sample_number] = (int**)malloc(samples_vector_array_size);
if (!state->gpu_samples) {
fprintf(stderr, "[ERROR] > Requesting CPU memory for sample_vector_array[%d]\n", sample_number);
exit(EXIT_FAILURE);
}
size_t samples_vector_fitness_size = sizeof(int) * state->number_of_prob_vectors;
state->samples_vector_fitness[sample_number] = (int*)malloc(samples_vector_fitness_size);
if (!state->samples_vector_fitness[sample_number]) {
fprintf(stderr, "[ERROR] Requesting samples_fitness CPU memory\n");
exit(EXIT_FAILURE);
}
}
size_t samples_fitness_size = sizeof(int*) * state->number_of_samples;
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting samples_fitness CPU memory (size: %i)\n", state->number_of_samples);
//#endif
state->samples_fitness = (int*)malloc(samples_fitness_size);
if (!state->samples_fitness) {
fprintf(stderr, "[ERROR] > Requesting CPU memory for samples_fitness_size\n");
exit(EXIT_FAILURE);
}
// === Memoria auxiliar ==================================================================
size_t gpu_int32_vector_sum_size = sizeof(long*) * state->number_of_prob_vectors;
state->gpu_int32_vector_sum = (long**)malloc(gpu_int32_vector_sum_size);
size_t cpu_int32_vector_sum_size = sizeof(long*) * state->number_of_prob_vectors;
state->cpu_int32_vector_sum = (long**)malloc(cpu_int32_vector_sum_size);
size_t gpu_bit_vector_sum_size = sizeof(int*) * state->number_of_prob_vectors;
state->gpu_bit_vector_sum = (int**)malloc(gpu_bit_vector_sum_size);
size_t cpu_bit_vector_sum_size = sizeof(int*) * state->number_of_prob_vectors;
state->cpu_bit_vector_sum = (int**)malloc(cpu_bit_vector_sum_size);
}
void bga_initialize_thread(struct bga_state *state, int prob_vector_number) {
cudaError_t error;
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
//#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Solicitando memoria para el thread %d =====\n", prob_vector_number);
//#endif
// === Pido la memoria para el vector de probabilidades ==================================
{
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
size_t prob_vector_size = sizeof(int) * current_prob_vector_number_of_bits;
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting %d bits GPU memory for prob_vector %d (size: %i / %lu Mb)\n",
current_prob_vector_number_of_bits, prob_vector_number, current_prob_vector_number_of_bits,
prob_vector_size >> 20);
//#endif
error = cudaMalloc((void**)&(state->gpu_prob_vectors[prob_vector_number]), prob_vector_size);
if (error != cudaSuccess) {
fprintf(stderr, "[ERROR] Requesting GPU memory for prob_vector_number[%d]\n", prob_vector_number);
exit(EXIT_FAILURE);
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "TIME] Processing time: %f (ms)\n", gputime);
ccudaEventRecord(start, 0);
#endif
// === Pido la memoria para los samples ==================================================
//#ifdef INFO
fprintf(stdout, "[INFO] Requesting a size %d samples CPU memory\n", state->number_of_samples);
//#endif
{
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
size_t sample_vector_size = sizeof(int) * (current_prob_vector_number_of_bits >> 5);
int right_size = current_prob_vector_number_of_bits & ((1<<5)-1);
assert(right_size == 0);
//#ifdef INFO
fprintf(stdout, "[INFO] > Requesting sample %d GPU memory for vector %d (size: %i / %lu Mb)\n",
sample_number, prob_vector_number, current_prob_vector_number_of_bits >> 5, sample_vector_size >> 20);
//#endif
error = cudaMalloc((void**)&(state->gpu_samples[sample_number][prob_vector_number]), sample_vector_size);
if (error != cudaSuccess) {
fprintf(stderr, "[ERROR] > Requesting GPU memory for sample_number[%d]\n", sample_number);
exit(EXIT_FAILURE);
}
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "TIME] Processing time: %f (ms)\n", gputime);
ccudaEventRecord(start, 0);
#endif
// === Inicializo el vector de probabilidades ============================================
#if defined(INFO) || defined(DEBUG)
fprintf(stdout, "[INFO] === Inicializando memoria =======================\n");
#endif
#if defined(TIMMING)
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
{
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
#ifdef INFO
fprintf(stdout, "[INFO] Inicializando GPU memory of prob_vector %d (%d bits)\n",
prob_vector_number, current_prob_vector_number_of_bits);
#endif
vector_set_int(state->gpu_prob_vectors[prob_vector_number],
current_prob_vector_number_of_bits, INIT_PROB_VECTOR_VALUE);
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
// === Memoria auxiliar ==================================================================
vector_sum_int_alloc(&(state->gpu_int32_vector_sum[prob_vector_number]),
&(state->cpu_int32_vector_sum[prob_vector_number]));
vector_sum_bit_alloc(&(state->gpu_bit_vector_sum[prob_vector_number]),
&(state->cpu_bit_vector_sum[prob_vector_number]));
}
long bga_get_part_accumulated_prob(struct bga_state *state, int prob_vector_number) {
vector_sum_int_init(state->gpu_int32_vector_sum[prob_vector_number]);
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sum_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[prob_vector_number],
current_prob_vector_number_of_bits);
state->prob_vectors_acc_prob[prob_vector_number] = vector_sum_int_get(
state->gpu_int32_vector_sum[prob_vector_number],
state->cpu_int32_vector_sum[prob_vector_number]);
return state->prob_vectors_acc_prob[prob_vector_number];
}
long bga_get_part_stats_prob(struct bga_state *state, int prob_vector_number, int op, int value) {
vector_sp_int_init(state->gpu_int32_vector_sum[prob_vector_number]);
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sp_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[prob_vector_number],
current_prob_vector_number_of_bits, op, value);
long result;
result = vector_sp_int_get(
state->gpu_int32_vector_sum[prob_vector_number],
state->cpu_int32_vector_sum[prob_vector_number]);
return result;
}
long bga_get_full_accumulated_prob(struct bga_state *state) {
long result = 0;
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
result += state->prob_vectors_acc_prob[prob_vector_number];
}
return result;
}
void bga_show_prob_vector_state(struct bga_state *state) {
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
fprintf(stdout, "[INFO] === Probability vector status =======================\n");
vector_sum_int_init(state->gpu_int32_vector_sum[0]);
vector_sum_int_show(state->gpu_int32_vector_sum[0], state->cpu_int32_vector_sum[0]);
fprintf(stdout, "[INFO] Prob. vector sample:");
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
if (prob_vector_number == 0) {
int probs_to_show_count = SHOW_PROB_VECTOR_BITS;
if (current_prob_vector_number_of_bits < SHOW_PROB_VECTOR_BITS)
probs_to_show_count = state->prob_vector_bit_count;
int *probs_to_show = (int*)malloc(sizeof(int) * probs_to_show_count);
ccudaMemcpy(probs_to_show, state->gpu_prob_vectors[prob_vector_number],
sizeof(uint32_t) * probs_to_show_count, cudaMemcpyDeviceToHost);
long sum = 0;
for (int i = 0; i < probs_to_show_count; i++) {
fprintf(stdout, " %d (%.4f)", probs_to_show[i], (float)probs_to_show[i] / (float)state->population_size);
sum += probs_to_show[i];
}
fprintf(stdout, "... Total [%d]: %ld ( %f )\n", probs_to_show_count, sum, (float)sum / (float)(probs_to_show_count * state->population_size));
free(probs_to_show);
}
vector_sum_int(state->gpu_prob_vectors[prob_vector_number],
state->gpu_int32_vector_sum[0], current_prob_vector_number_of_bits);
}
long accumulated_probability = 0;
accumulated_probability = vector_sum_int_get(
state->gpu_int32_vector_sum[0],
state->cpu_int32_vector_sum[0]);
fprintf(stdout, "[INFO] Prob. vector accumulated probability (%ld / %ld): %f\n",
accumulated_probability, state->max_prob_sum,
(float)accumulated_probability / (float)state->max_prob_sum);
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void bga_compute_sample_part_fitness(struct bga_state *state, int prob_vector_number) {
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
#if defined(INFO)
fprintf(stdout, "[INFO] === Sample vectors fitness =============================\n");
#endif
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
vector_sum_bit_init(state->gpu_bit_vector_sum[prob_vector_number]);
#if defined(DEBUG)
fprintf(stdout, "[INFO] Computing sample vector %d fitness: ", sample_number);
#endif
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
vector_sum_bit(state->gpu_samples[sample_number][prob_vector_number],
state->gpu_bit_vector_sum[prob_vector_number], current_prob_vector_number_of_bits);
state->samples_vector_fitness[sample_number][prob_vector_number] = vector_sum_bit_get(
state->gpu_bit_vector_sum[prob_vector_number],
state->cpu_bit_vector_sum[prob_vector_number]);
#if defined(DEBUG)
fprintf(stdout, "%d\n", state->samples_vector_fitness[sample_number][prob_vector_number]);
#endif
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void bga_compute_sample_full_fitness(struct bga_state *state) {
int result;
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
result = 0;
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
result += state->samples_vector_fitness[sample_number][prob_vector_number];
}
state->samples_fitness[sample_number] = result;
}
}
void bga_show_samples(struct bga_state *state) {
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
fprintf(stdout, "[INFO] === Sample vectors =====================================\n");
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
fprintf(stdout, "[INFO] Sample vector sample (%d):", sample_number);
for (int prob_vector_number = 0; prob_vector_number < state->number_of_prob_vectors; prob_vector_number++) {
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
if (prob_vector_number == 0) {
int bits_to_show_count = SHOW_SAMPLE_BITS;
if (current_prob_vector_number_of_bits < SHOW_SAMPLE_BITS)
bits_to_show_count = state->prob_vector_bit_count;
int bytes_to_show_count = bits_to_show_count >> 5;
int *bytes_to_show = (int*)malloc(sizeof(int) * bytes_to_show_count);
ccudaMemcpy(bytes_to_show, state->gpu_samples[sample_number][prob_vector_number],
sizeof(uint32_t) * bytes_to_show_count, cudaMemcpyDeviceToHost);
for (int i = 0; i < bytes_to_show_count; i++) {
fprintf(stdout, " %s", int_to_binary(bytes_to_show[i]));
}
free(bytes_to_show);
fprintf(stdout, "...\n");
}
fprintf(stdout, "[INFO] Prob. vector %d, sample %d >> fitness: %d\n", prob_vector_number, sample_number, state->samples_vector_fitness[sample_number][prob_vector_number]);
}
}
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
__global__ void kern_sample_prob_vector(int *gpu_prob_vector, int prob_vector_size,
int prob_vector_starting_pos, float *prng_vector, int prng_vector_size, int *gpu_sample,
int population_size) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int samples_per_loop = gridDim.x * blockDim.x;
int max_samples_doable = prob_vector_size - prob_vector_starting_pos;
if (max_samples_doable > prng_vector_size) max_samples_doable = prng_vector_size;
int loops_count = max_samples_doable / samples_per_loop;
if (max_samples_doable % samples_per_loop > 0) loops_count++;
__shared__ int current_block_sample[SAMPLE_PROB_VECTOR_THREADS];
int prob_vector_position;
int prng_position;
int block_starting_pos;
for (int loop = 0; loop < loops_count; loop++) {
// 0 por defecto.
current_block_sample[tid] = 0;
// Cada loop genera blockDim.x bits y los guarda en el array de __shared__ memory.
block_starting_pos = (samples_per_loop * loop) + (bid * blockDim.x);
prng_position = block_starting_pos + tid;
prob_vector_position = prob_vector_starting_pos + prng_position;
if (prng_position < max_samples_doable) {
if ((gpu_prob_vector[prob_vector_position] + population_size) >= (prng_vector[prng_position] * population_size)) {
// 1
current_block_sample[tid] = 1 << (tid & ((1 << 5)-1));
}
}
__syncthreads();
if ((tid << 5) < SAMPLE_PROB_VECTOR_THREADS) {
int aux = current_block_sample[tid << 5];
#pragma unroll
for (int i = 1; i < 32; i++) {
aux = aux | current_block_sample[(tid << 5)+i];
}
int sample_pos = prob_vector_starting_pos + block_starting_pos;
if ((sample_pos + (tid << 5)) < prob_vector_size) {
gpu_sample[(sample_pos >> 5) + tid] = aux;
}
}
__syncthreads();
}
}
// Paso 2 del algoritmo.
void bga_model_sampling_mt(struct bga_state *state, mtgp32_status *mt_status, int prob_vector_number) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] === Sampling the model =======================\n");
#endif
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
cudaEvent_t start_inner;
cudaEvent_t end_inner;
#endif
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] > Sample %d ", sample_number);
#endif
#if defined(TIMMING)
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
ccudaEventCreate(&start_inner);
ccudaEventCreate(&end_inner);
#endif
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
int total_loops;
total_loops = current_prob_vector_number_of_bits / mt_status->numbers_per_gen;
if (current_prob_vector_number_of_bits % mt_status->numbers_per_gen > 0) total_loops++;
int prob_vector_starting_pos;
for (int loop = 0; loop < total_loops; loop++) {
prob_vector_starting_pos = mt_status->numbers_per_gen * loop;
// Genero números aleatorios.
#if defined(TIMMING)
fprintf(stdout, "[TIME] Generate mtgp32_generate_float\n", gputime);
ccudaEventRecord(start_inner, 0);
#endif
mtgp32_generate_float(mt_status);
#if defined(TIMMING)
ccudaEventRecord(end_inner, 0);
ccudaEventSynchronize(end_inner);
ccudaEventElapsedTime(&gputime, start_inner, end_inner);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
#endif
#if defined(DEBUG)
fprintf(stdout, ".");
#endif
#if defined(TIMMING)
fprintf(stdout, "[TIME] Generate kern_sample_prob_vector\n", gputime);
ccudaEventRecord(start_inner, 0);
#endif
// Sampleo el vector de prob. con los números aleatorios generados.
kern_sample_prob_vector<<< SAMPLE_PROB_VECTOR_BLOCKS, SAMPLE_PROB_VECTOR_THREADS>>>(
state->gpu_prob_vectors[prob_vector_number], current_prob_vector_number_of_bits,
prob_vector_starting_pos, (float*)mt_status->d_data, mt_status->numbers_per_gen,
state->gpu_samples[sample_number][prob_vector_number], state->population_size);
#if defined(TIMMING)
ccudaEventRecord(end_inner, 0);
ccudaEventSynchronize(end_inner);
ccudaEventElapsedTime(&gputime, start_inner, end_inner);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
#endif
}
#if defined(DEBUG)
fprintf(stdout, "(%d)\n", total_loops);
#endif
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Total processing time: %f (ms)\n", gputime);
#endif
}
//bga_show_samples(state);
#if defined(TIMMING)
ccudaEventDestroy(start_inner);
ccudaEventDestroy(end_inner);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
void cpu_model_update(int *gpu_prob_vector, int prob_vector_size,
int *gpu_best_sample, int *gpu_worst_sample) {
int *prob_vector = (int*)malloc(sizeof(int) * prob_vector_size);
ccudaMemcpy(prob_vector, gpu_prob_vector, sizeof(uint32_t) * prob_vector_size,
cudaMemcpyDeviceToHost);
long current_acc_prob = 0;
for (int i = 0; i < prob_vector_size; i++) {
current_acc_prob += prob_vector[i];
}
int sample_size = prob_vector_size >> 5;
int *best_sample = (int*)malloc(sizeof(int) * sample_size);
int *worst_sample = (int*)malloc(sizeof(int) * sample_size);
ccudaMemcpy(best_sample, gpu_best_sample,
sizeof(uint32_t) * sample_size, cudaMemcpyDeviceToHost);
ccudaMemcpy(worst_sample, gpu_worst_sample,
sizeof(uint32_t) * sample_size, cudaMemcpyDeviceToHost);
int best_sample_current_bit_value;
int worst_sample_current_bit_value;
int delta;
for (int i = 0; i < prob_vector_size; i++) {
int bit_pos = i & ((1 << 5)-1);
int int_pos = i >> 5;
best_sample_current_bit_value = (best_sample[int_pos] & (1 << bit_pos)) >> bit_pos;
worst_sample_current_bit_value = (worst_sample[int_pos] & (1 << bit_pos)) >> bit_pos;
delta = best_sample_current_bit_value - worst_sample_current_bit_value;
prob_vector[i] += delta;
}
long new_acc_prob = 0;
for (int i = 0; i < prob_vector_size; i++) {
new_acc_prob += prob_vector[i];
}
fprintf(stdout, "[DEBUG][CPU] Acc. prob. => Current %ld , New %ld (delta: %ld )\n",
current_acc_prob, new_acc_prob, new_acc_prob - current_acc_prob);
free(prob_vector);
free(best_sample);
free(worst_sample);
}
__global__ void kern_model_update(int *gpu_prob_vector, int prob_vector_size,
int *best_sample, int *worst_sample, int max_value) {
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ int best_sample_part[UPDATE_PROB_VECTOR_SHMEM];
__shared__ int worst_sample_part[UPDATE_PROB_VECTOR_SHMEM];
int loop_size = gridDim.x * blockDim.x;
int loop_count = prob_vector_size / loop_size;
if (prob_vector_size % loop_size > 0) loop_count++;
int prob_vector_position;
int block_starting_pos;
const int tid_int = tid >> 5;
const int tid_bit = tid & ((1 << 5)-1);
int best_sample_current_bit_value;
int worst_sample_current_bit_value;
int delta;
for (int loop = 0; loop < loop_count; loop++) {
block_starting_pos = (loop_size * loop) + (bid * blockDim.x);
if (tid < UPDATE_PROB_VECTOR_SHMEM) {
if ((block_starting_pos + (tid << 5)) < prob_vector_size) {
best_sample_part[tid] = best_sample[(block_starting_pos >> 5) + tid];
worst_sample_part[tid] = worst_sample[(block_starting_pos >> 5) + tid];
}
}
__syncthreads();
prob_vector_position = block_starting_pos + tid;
if (prob_vector_position < prob_vector_size) {
best_sample_current_bit_value = (best_sample_part[tid_int] & (1 << tid_bit)) >> tid_bit;
worst_sample_current_bit_value = (worst_sample_part[tid_int] & (1 << tid_bit)) >> tid_bit;
delta = (best_sample_current_bit_value - worst_sample_current_bit_value) * DELTA;
int aux = gpu_prob_vector[prob_vector_position];
aux = aux + delta;
if (aux < MIN_PVALUE) {
aux = MIN_PVALUE;
} else if (aux > MAX_PVALUE) {
aux = MAX_PVALUE;
}
gpu_prob_vector[prob_vector_position] = aux;
}
}
}
// Paso 4 y 5 del algoritmo.
void bga_model_update(struct bga_state *state, int prob_vector_number) {
#if defined(DEBUG)
fprintf(stdout, "[INFO] === Updating the model =======================\n");
#endif
#if defined(TIMMING)
float gputime;
cudaEvent_t start;
cudaEvent_t end;
ccudaEventCreate(&start);
ccudaEventCreate(&end);
ccudaEventRecord(start, 0);
#endif
assert(state->number_of_samples == 2);
int best_sample_index, worst_sample_index;
long fitness_sample_a, fitness_sample_b;
#if defined(FULL_FITNESS_UPDATE)
fitness_sample_a = state->samples_fitness[0];
fitness_sample_b = state->samples_fitness[1];
#endif
#if defined(PARTIAL_FITNESS_UPDATE)
fitness_sample_a = state->samples_vector_fitness[0][prob_vector_number];
fitness_sample_b = state->samples_vector_fitness[1][prob_vector_number];
#endif
#if defined(DEBUG)
fprintf(stdout, "[INFO] prob. vector[%d] fitness_sample_a=%ld fitness_sample_a=%ld\n",
prob_vector_number, fitness_sample_a, fitness_sample_b);
#endif
if (fitness_sample_a >= fitness_sample_b) {
best_sample_index = 0;
worst_sample_index = 1;
}
else {
best_sample_index = 1;
worst_sample_index = 0;
}
#if defined(DEBUG)
fprintf(stdout, "[INFO] prob. vector[%d] best=%d, worst=%d\n",
prob_vector_number, best_sample_index, worst_sample_index);
#endif
int *best_sample;
int *worst_sample;
int current_prob_vector_number_of_bits = state->prob_vector_bit_count;
if (prob_vector_number + 1 == state->number_of_prob_vectors) {
current_prob_vector_number_of_bits = state->last_prob_vector_bit_count;
}
best_sample = state->gpu_samples[best_sample_index][prob_vector_number];
worst_sample = state->gpu_samples[worst_sample_index][prob_vector_number];
/*cpu_model_update(state->gpu_prob_vectors[prob_vector_number],
current_prob_vector_number_of_bits,
best_sample, worst_sample);
*/
kern_model_update <<< UPDATE_PROB_VECTOR_BLOCKS, UPDATE_PROB_VECTOR_THREADS >>>(
state->gpu_prob_vectors[prob_vector_number], current_prob_vector_number_of_bits,
best_sample, worst_sample, state->population_size);
#if defined(TIMMING)
ccudaEventRecord(end, 0);
ccudaEventSynchronize(end);
ccudaEventElapsedTime(&gputime, start, end);
fprintf(stdout, "[TIME] Processing time: %f (ms)\n", gputime);
ccudaEventDestroy(start);
ccudaEventDestroy(end);
#endif
}
// Libera la memoria pedida para de estado.
void bga_free(struct bga_state *state) {
#ifdef INFO
fprintf(stdout, "[INFO] Freeing memory\n");
#endif
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing gpu_prob_vectors[%d]\n", vector_number);
cudaFree(state->gpu_prob_vectors[vector_number]);
}
free(state->gpu_prob_vectors);
for (int sample_number = 0; sample_number < state->number_of_samples; sample_number++) {
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing gpu_samples[%d][%d]\n", sample_number, vector_number);
cudaFree(state->gpu_samples[sample_number][vector_number]);
}
free(state->gpu_samples[sample_number]);
}
free(state->gpu_samples);
free(state->samples_fitness);
for (int vector_number = 0; vector_number < state->number_of_prob_vectors; vector_number++) {
fprintf(stderr, "[INFO] Freeing vector_sum_float_free[%d]\n", vector_number);
vector_sum_int_free(
state->gpu_int32_vector_sum[vector_number],
state->cpu_int32_vector_sum[vector_number]);
fprintf(stderr, "[INFO] Freeing vector_sum_bit_free[%d]\n", vector_number);
vector_sum_bit_free(
state->gpu_bit_vector_sum[vector_number],
state->cpu_bit_vector_sum[vector_number]);
}
free(state->gpu_int32_vector_sum);
free(state->cpu_int32_vector_sum);
free(state->gpu_bit_vector_sum);
free(state->cpu_bit_vector_sum);
}
|
56f1d271568e2c0674aa38e0a78143f9ae5e317f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ int gpu_sameVectors(int *vecA, int *vecB, int size)
{
int same = 1;
for (int i = 0; i < size; i++)
{
if (vecA[i] != vecB[i])
{
same = 0;
break;
}
}
return same;
}
__device__ int gpu_increase(const int *Ntemp, int *it, int Ntemp_size){
#pragma unroll
for (int i = 0, size = Ntemp_size; i != size; ++i) {
const int index = size - 1 - i;
++it[index];
if (it[index] > Ntemp[index]) {
it[index] = 0;
} else {
return 1;
}
}
return 0;
}
__device__ int gpu_sumFun(int *A, int *B)
{
int summ=0.0;
#pragma unroll
for(int i=0; i<(Pow(k,2)); i++)
{
summ= summ + A[i]*B[i];
}
return summ;
}
__device__ void gpu_generate2(int *Ntemp, int Ntemp_size, int *Ctemp, int *NMinusStemp, int *dev_roundVec, const int T, const int powk){
//Ntemp_size = pow(k,2)
//vector<int> it(Ntemp.size(), 0);
int it[Ntemp_size];
int counter = 0;
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
it[i] = 0;
do {
int s[Ntemp_size];
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
s[i] = it[i];
}
//for(vector<int>::const_iterator i = it.begin(); i != it.end(); ++i)
//{
// s.push_back(*i);
//}
//Cwhole.push_back(s);
int sSum=gpu_sumFun(s,roundVec);
if(sSum <= T)
{
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
Ctemp[counter*Ntemp_size + i] = s[i];
}
//Ctemp.push_back(s);
int NS[Ntemp_size];
#pragma unroll
for(int j=0; j<powK; j++)
{
NS[j] = Ntemp[j]-s[j];
}
if(gpu_sameVectors(NS, Ntemp, Ntemp_size))
continue;
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
NMinusStemp[counter * Ntemp_size + i] = NS[i];
}
//NMinusStemp.push_back(NS);
}
counter++;
}while (gpu_increase(Ntemp, it, Ntemp_size));
}
//for(int j=indexomp;j< (counterVec[i] + indexomp) ;j++) // this is to determine the job for each level
__global__ void FindOPT(int *dev_ATE_elm, int *dev_counterVec, int indexomp, int *dev_roundVec, const int T, const int k, const int powK, const int dev_AllTableElemets_size,
int *dev_ATE_Csubsets, int *dev_ATE_NSsubsets, int *dev_ATE_NSsubsets_size, int Cwhole_size, int *dev_zeroVec, int *dev_ATE_optVector, int *dev_ATE_optVector_size,
int *dev_ATE_myOPT, int *dev_ATE_myOptimalindex, int *dev_ATE_myMinNSVector){
int thread = blockDim.x * blockIdx.x + threadIdx.x;
int j = thread + indexomp;
if (j < dev_counterVec[i] + indexomp){
//vector<vector<int> > Ctemp;
//vector<vector<int> > NMinusStemp;
//vector<vector<int> > Cwhole;
//generate2(AllTableElemets[j].elm,Ctemp,NMinusStemp);
gpu_generate2(&dev_ATE_elm[j * powK], powK, dev_ATE_Csubsets, dev_ATE_NSsubsets, dev_roundVec, T, powK); //dev_ATE_elm_size[j] = Ntemp.size() = powK
// AllTableElemets[j].NSsubsets=NMinusStemp; //ni-si
// AllTableElemets[j].Csubsets=Ctemp; //configurations
// for(int h=0;h<AllTableElemets[j].NSsubsets.size();h++) // looking through subset of NSTableElements[j],
// NSTableElements is the table for all previous OPT. Find all subsets(dependency) of selected job
int optVecIndex = 0;
for(int h=0; h < dev_ATE_NSsubsets_size[j]; h++)
{
// if(AllTableElemets[j].NSsubsets[h]==zeroVec) // if subset is zero Vector , its OPT is 0
if(gpu_sameVectors(dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_zeroVec, powK))
{
//AllTableElemets[j].optVector.push_back(0);
dev_ATE_optVector[j * powK + optVecIndex] = 0;
optVecIndex++;
break;
}
//if(AllTableElemets[j].NSsubsets[h]==AllTableElemets[j].elm) // if NSsubsets[h] is equal to NSTableElements[j] (itself)
//( the one that we are doing operation for it ) ----> break (not interested )
// check if it is itself, if yes, ignore OPT of job itself.
if(gpu_sameVectors(dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_ATE_elm[j * powK], powK) )
break;
//for(int r=0; r<AllTableElemets.size();r++) // to find the match in the NSTableElements for reaching OPT
//dependencies may not be consectively stored in the table, so have to go through the whole
//table (AllTableElemets) and find them (matched to AllTableElemets[j].NSsubsets[h]).
for (int r = 0; r < dev_AllTableElemets_size; r++)
{
//if(AllTableElemets[j].NSsubsets[h]==AllTableElemets[r].elm) // if found match of NSsubsets[h], copy its OPT and break
if (dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_ATE_elm[r * powK], powK)
{
//AllTableElemets[j].optVector.push_back(AllTableElemets[r].myOPT);
dev_ATE_optVector[j * powK + optVecIndex] = dev_ATE_myOPT[r];
optVecIndex++;
break;
}
}
}
int minn = 100000;
int myOptimalindex;
//for(int pp=0; pp<AllTableElemets[j].optVector.size();pp++) // find out the OPT from all dependencies.
#pragma unroll
for (int pp = 0; pp < dev_ATE_optVector_size[j]; pp++)
{
// cout << AllTableElemets[j].optVector[pp]<<" ";
// if(AllTableElemets[j].optVector[pp] < minn)
if (dev_ATE_optVector[j * powK + pp] < minn)
{
// minn=AllTableElemets[j].optVector[pp];
minn = dev_ATE_optVector[j * powK + pp];
myOptimalindex=pp;
}
}
// cout << endl;
int optTemp=minn+1;
// AllTableElemets[j].myOPT=optTemp;
dev_ATE_myOPT[j] = optTemp;
// AllTableElemets[j].myOptimalindex=myOptimalindex;
dev_ATE_myOptimalindex[j] = myOptimalindex;
// if(AllTableElemets[j].NSsubsets.size()>0)
if (dev_ATE_NSsubsets_size[j] > 0)
{
// AllTableElemets[j].myMinNSVector=AllTableElemets[j].NSsubsets[myOptimalindex];
// dev_ATE_NSsubsets_size[j], does this vector store all same value for sizes?
hipMemcpy(&dev_ATE_myMinNSVector[j * vectorSize], &dev_ATE_NSsubsets[(j * Cwhole_size + myOptimalindex) * powK], powK, hipMemcpyDeviceToDevice);
//dev_ATE_myMinNSVector[j] = dev_ATE_NSsubsets[(j * CWhole.SIZE + myOptimalindex) * pow(k,2)];
}
}//end if (j)
}//end FindOPT()
|
56f1d271568e2c0674aa38e0a78143f9ae5e317f.cu
|
__device__ int gpu_sameVectors(int *vecA, int *vecB, int size)
{
int same = 1;
for (int i = 0; i < size; i++)
{
if (vecA[i] != vecB[i])
{
same = 0;
break;
}
}
return same;
}
__device__ int gpu_increase(const int *Ntemp, int *it, int Ntemp_size){
#pragma unroll
for (int i = 0, size = Ntemp_size; i != size; ++i) {
const int index = size - 1 - i;
++it[index];
if (it[index] > Ntemp[index]) {
it[index] = 0;
} else {
return 1;
}
}
return 0;
}
__device__ int gpu_sumFun(int *A, int *B)
{
int summ=0.0;
#pragma unroll
for(int i=0; i<(Pow(k,2)); i++)
{
summ= summ + A[i]*B[i];
}
return summ;
}
__device__ void gpu_generate2(int *Ntemp, int Ntemp_size, int *Ctemp, int *NMinusStemp, int *dev_roundVec, const int T, const int powk){
//Ntemp_size = pow(k,2)
//vector<int> it(Ntemp.size(), 0);
int it[Ntemp_size];
int counter = 0;
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
it[i] = 0;
do {
int s[Ntemp_size];
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
s[i] = it[i];
}
//for(vector<int>::const_iterator i = it.begin(); i != it.end(); ++i)
//{
// s.push_back(*i);
//}
//Cwhole.push_back(s);
int sSum=gpu_sumFun(s,roundVec);
if(sSum <= T)
{
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
Ctemp[counter*Ntemp_size + i] = s[i];
}
//Ctemp.push_back(s);
int NS[Ntemp_size];
#pragma unroll
for(int j=0; j<powK; j++)
{
NS[j] = Ntemp[j]-s[j];
}
if(gpu_sameVectors(NS, Ntemp, Ntemp_size))
continue;
#pragma unroll
for (int i = 0; i < Ntemp_size; i++)
{
NMinusStemp[counter * Ntemp_size + i] = NS[i];
}
//NMinusStemp.push_back(NS);
}
counter++;
}while (gpu_increase(Ntemp, it, Ntemp_size));
}
//for(int j=indexomp;j< (counterVec[i] + indexomp) ;j++) // this is to determine the job for each level
__global__ void FindOPT(int *dev_ATE_elm, int *dev_counterVec, int indexomp, int *dev_roundVec, const int T, const int k, const int powK, const int dev_AllTableElemets_size,
int *dev_ATE_Csubsets, int *dev_ATE_NSsubsets, int *dev_ATE_NSsubsets_size, int Cwhole_size, int *dev_zeroVec, int *dev_ATE_optVector, int *dev_ATE_optVector_size,
int *dev_ATE_myOPT, int *dev_ATE_myOptimalindex, int *dev_ATE_myMinNSVector){
int thread = blockDim.x * blockIdx.x + threadIdx.x;
int j = thread + indexomp;
if (j < dev_counterVec[i] + indexomp){
//vector<vector<int> > Ctemp;
//vector<vector<int> > NMinusStemp;
//vector<vector<int> > Cwhole;
//generate2(AllTableElemets[j].elm,Ctemp,NMinusStemp);
gpu_generate2(&dev_ATE_elm[j * powK], powK, dev_ATE_Csubsets, dev_ATE_NSsubsets, dev_roundVec, T, powK); //dev_ATE_elm_size[j] = Ntemp.size() = powK
// AllTableElemets[j].NSsubsets=NMinusStemp; //ni-si
// AllTableElemets[j].Csubsets=Ctemp; //configurations
// for(int h=0;h<AllTableElemets[j].NSsubsets.size();h++) // looking through subset of NSTableElements[j],
// NSTableElements is the table for all previous OPT. Find all subsets(dependency) of selected job
int optVecIndex = 0;
for(int h=0; h < dev_ATE_NSsubsets_size[j]; h++)
{
// if(AllTableElemets[j].NSsubsets[h]==zeroVec) // if subset is zero Vector , its OPT is 0
if(gpu_sameVectors(dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_zeroVec, powK))
{
//AllTableElemets[j].optVector.push_back(0);
dev_ATE_optVector[j * powK + optVecIndex] = 0;
optVecIndex++;
break;
}
//if(AllTableElemets[j].NSsubsets[h]==AllTableElemets[j].elm) // if NSsubsets[h] is equal to NSTableElements[j] (itself)
//( the one that we are doing operation for it ) ----> break (not interested )
// check if it is itself, if yes, ignore OPT of job itself.
if(gpu_sameVectors(dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_ATE_elm[j * powK], powK) )
break;
//for(int r=0; r<AllTableElemets.size();r++) // to find the match in the NSTableElements for reaching OPT
//dependencies may not be consectively stored in the table, so have to go through the whole
//table (AllTableElemets) and find them (matched to AllTableElemets[j].NSsubsets[h]).
for (int r = 0; r < dev_AllTableElemets_size; r++)
{
//if(AllTableElemets[j].NSsubsets[h]==AllTableElemets[r].elm) // if found match of NSsubsets[h], copy its OPT and break
if (dev_ATE_NSsubsets[(j * Cwhole_size + h) * powK], dev_ATE_elm[r * powK], powK)
{
//AllTableElemets[j].optVector.push_back(AllTableElemets[r].myOPT);
dev_ATE_optVector[j * powK + optVecIndex] = dev_ATE_myOPT[r];
optVecIndex++;
break;
}
}
}
int minn = 100000;
int myOptimalindex;
//for(int pp=0; pp<AllTableElemets[j].optVector.size();pp++) // find out the OPT from all dependencies.
#pragma unroll
for (int pp = 0; pp < dev_ATE_optVector_size[j]; pp++)
{
// cout << AllTableElemets[j].optVector[pp]<<" ";
// if(AllTableElemets[j].optVector[pp] < minn)
if (dev_ATE_optVector[j * powK + pp] < minn)
{
// minn=AllTableElemets[j].optVector[pp];
minn = dev_ATE_optVector[j * powK + pp];
myOptimalindex=pp;
}
}
// cout << endl;
int optTemp=minn+1;
// AllTableElemets[j].myOPT=optTemp;
dev_ATE_myOPT[j] = optTemp;
// AllTableElemets[j].myOptimalindex=myOptimalindex;
dev_ATE_myOptimalindex[j] = myOptimalindex;
// if(AllTableElemets[j].NSsubsets.size()>0)
if (dev_ATE_NSsubsets_size[j] > 0)
{
// AllTableElemets[j].myMinNSVector=AllTableElemets[j].NSsubsets[myOptimalindex];
// dev_ATE_NSsubsets_size[j], does this vector store all same value for sizes?
cudaMemcpy(&dev_ATE_myMinNSVector[j * vectorSize], &dev_ATE_NSsubsets[(j * Cwhole_size + myOptimalindex) * powK], powK, cudaMemcpyDeviceToDevice);
//dev_ATE_myMinNSVector[j] = dev_ATE_NSsubsets[(j * CWhole.SIZE + myOptimalindex) * pow(k,2)];
}
}//end if (j)
}//end FindOPT()
|
3c3aea04a80a450f0066df9c3ea07c2b4f29a0be.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BUBreadthFirstSearch/BottomUpBFS.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <hip/hip_runtime_api.h> //--profile-from-start off
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
using vid_t = int;
using dst_t = int;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
// graph::GraphStd<vid_t, eoff_t> graph;
graph::GraphStd<vid_t, eoff_t> graph(DIRECTED | ENABLE_INGOING);
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetInit hornet_init_inverse(graph.nV(), graph.nE(),
graph.csr_in_offsets(),
graph.csr_in_edges());
HornetGraph hornet_graph_inv(hornet_init_inverse);
HornetGraph hornet_graph(hornet_init);
BfsBottomUp2 bfs_bottom_up(hornet_graph, hornet_graph_inv);
BfsBottomUp2 bfs_top_down(hornet_graph, hornet_graph_inv);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
bfs_bottom_up.set_parameters(root);
bfs_top_down.set_parameters(root);
Timer<DEVICE> TM;
hipProfilerStart();
TM.start();
//bfs_top_down.run();
bfs_bottom_up.run(hornet_graph_inv);
TM.stop();
hipProfilerStop();
TM.print("BottomUp2");
auto is_correct = bfs_bottom_up.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
ret = exec(argc, argv);
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
return ret;
}
|
3c3aea04a80a450f0066df9c3ea07c2b4f29a0be.cu
|
/**
* @brief Breadth-first Search Top-Down test program
* @file
*/
#include "Static/BUBreadthFirstSearch/BottomUpBFS.cuh"
#include <StandardAPI.hpp>
#include <Graph/GraphStd.hpp>
#include <Util/CommandLineParam.hpp>
#include <cuda_profiler_api.h> //--profile-from-start off
int exec(int argc, char* argv[]) {
using namespace timer;
using namespace hornets_nest;
using vid_t = int;
using dst_t = int;
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
// graph::GraphStd<vid_t, eoff_t> graph;
graph::GraphStd<vid_t, eoff_t> graph(DIRECTED | ENABLE_INGOING);
CommandLineParam cmd(graph, argc, argv,false);
HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(),
graph.csr_out_edges());
HornetInit hornet_init_inverse(graph.nV(), graph.nE(),
graph.csr_in_offsets(),
graph.csr_in_edges());
HornetGraph hornet_graph_inv(hornet_init_inverse);
HornetGraph hornet_graph(hornet_init);
BfsBottomUp2 bfs_bottom_up(hornet_graph, hornet_graph_inv);
BfsBottomUp2 bfs_top_down(hornet_graph, hornet_graph_inv);
vid_t root = graph.max_out_degree_id();
if (argc==3)
root = atoi(argv[2]);
bfs_bottom_up.set_parameters(root);
bfs_top_down.set_parameters(root);
Timer<DEVICE> TM;
cudaProfilerStart();
TM.start();
//bfs_top_down.run();
bfs_bottom_up.run(hornet_graph_inv);
TM.stop();
cudaProfilerStop();
TM.print("BottomUp2");
auto is_correct = bfs_bottom_up.validate();
std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n");
return !is_correct;
}
int main(int argc, char* argv[]) {
int ret = 0;
hornets_nest::gpu::initializeRMMPoolAllocation();//update initPoolSize if you know your memory requirement and memory availability in your system, if initial pool size is set to 0 (default value), RMM currently assigns half the device memory.
{//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
ret = exec(argc, argv);
}//scoping technique to make sure that hornets_nest::gpu::finalizeRMMPoolAllocation is called after freeing all RMM allocations.
hornets_nest::gpu::finalizeRMMPoolAllocation();
return ret;
}
|
5e7cb7ccca0365084c6a440a5a8e1b1ab7ebd508.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Image.cuh"
const unsigned char* Image::data() const
{
return _data;
}
size_t Image::width() const
{
return _width;
}
size_t Image::height() const
{
return _height;
}
size_t Image::depth() const
{
return _depth;
}
size_t Image::spectrum() const
{
return _spectrum;
}
size_t Image::size() const
{
return _width * _height * _depth * _spectrum;
}
long Image::offset(const int& x, const int& y, const int& z, const int& c) const
{
return x + y * (long)_width + z * (long)(_width * _height) + c * (long)(_width * _height * _depth);
}
const unsigned char& Image::at(const int& x, const int& y, const int& z, const int& c) const
{
return _data[offset(x, y, z, c)];
}
unsigned char& Image::at(const int& x, const int& y, const int& z, const int& c)
{
return _data[offset(x, y, z, c)];
}
double Image::cubic_atXY(const double& fx, const double& fy, const int& z, const int& c) const
{
const double nfx = fx < 0 ? 0 : (fx > _width - 1 ? _width - 1 : fx);
const double nfy = fy < 0 ? 0 : (fy > _height - 1 ? _height - 1 : fy);
const int x = (int)nfx, y = (int)nfy;
const double dx = nfx - x, dy = nfy - y;
const int px = x - 1 < 0 ? 0 : x - 1, nx = dx > 0 ? x + 1 : x, ax = x + 2 >= _width ? _width - 1 : x + 2;
const int py = y - 1 < 0 ? 0 : y - 1, ny = dy > 0 ? y + 1 : y, ay = y + 2 >= _height ? _height - 1 : y + 2;
const double
Ipp = (double)at(px, py, z, c), Icp = (double)at(x, py, z, c), Inp = (double)at(nx, py, z, c),
Iap = (double)at(ax, py, z, c),
Ip = Icp + 0.5f * (dx * (-Ipp + Inp) + dx * dx * (2 * Ipp - 5 * Icp + 4 * Inp - Iap) + dx * dx * dx * (-Ipp + 3 * Icp - 3 * Inp + Iap)),
Ipc = (double)at(px, y, z, c), Icc = (double)at(x, y, z, c), Inc = (double)at(nx, y, z, c),
Iac = (double)at(ax, y, z, c),
Ic = Icc + 0.5f * (dx * (-Ipc + Inc) + dx * dx * (2 * Ipc - 5 * Icc + 4 * Inc - Iac) + dx * dx * dx * (-Ipc + 3 * Icc - 3 * Inc + Iac)),
Ipn = (double)at(px, ny, z, c), Icn = (double)at(x, ny, z, c), Inn = (double)at(nx, ny, z, c),
Ian = (double)at(ax, ny, z, c),
In = Icn + 0.5f * (dx * (-Ipn + Inn) + dx * dx * (2 * Ipn - 5 * Icn + 4 * Inn - Ian) + dx * dx * dx * (-Ipn + 3 * Icn - 3 * Inn + Ian)),
Ipa = (double)at(px, ay, z, c), Ica = (double)at(x, ay, z, c), Ina = (double)at(nx, ay, z, c),
Iaa = (double)at(ax, ay, z, c),
Ia = Ica + 0.5f * (dx * (-Ipa + Ina) + dx * dx * (2 * Ipa - 5 * Ica + 4 * Ina - Iaa) + dx * dx * dx * (-Ipa + 3 * Ica - 3 * Ina + Iaa));
return Ic + 0.5f * (dy * (-Ip + In) + dy * dy * (2 * Ip - 5 * Ic + 4 * In - Ia) + dy * dy * dy * (-Ip + 3 * Ic - 3 * In + Ia));
}
DeviceImage::DeviceImage(size_t width, size_t height, size_t depth, size_t spectrum)
{
_width = width;
_height = height;
_depth = depth;
_spectrum = spectrum;
hipMalloc(&_data, sizeof(unsigned char) * size());
}
DeviceImage::DeviceImage(const cimg_library::CImg<unsigned char>& image)
{
_width = image.width();
_height = image.height();
_depth = image.depth();
_spectrum = image.spectrum();
hipMalloc(&_data, sizeof(unsigned char) * image.size());
hipMemcpy(_data, image.data(), sizeof(unsigned char) * image.size(), hipMemcpyHostToDevice);
}
DeviceImage::~DeviceImage()
{
hipFree(_data);
}
|
5e7cb7ccca0365084c6a440a5a8e1b1ab7ebd508.cu
|
#include "Image.cuh"
const unsigned char* Image::data() const
{
return _data;
}
size_t Image::width() const
{
return _width;
}
size_t Image::height() const
{
return _height;
}
size_t Image::depth() const
{
return _depth;
}
size_t Image::spectrum() const
{
return _spectrum;
}
size_t Image::size() const
{
return _width * _height * _depth * _spectrum;
}
long Image::offset(const int& x, const int& y, const int& z, const int& c) const
{
return x + y * (long)_width + z * (long)(_width * _height) + c * (long)(_width * _height * _depth);
}
const unsigned char& Image::at(const int& x, const int& y, const int& z, const int& c) const
{
return _data[offset(x, y, z, c)];
}
unsigned char& Image::at(const int& x, const int& y, const int& z, const int& c)
{
return _data[offset(x, y, z, c)];
}
double Image::cubic_atXY(const double& fx, const double& fy, const int& z, const int& c) const
{
const double nfx = fx < 0 ? 0 : (fx > _width - 1 ? _width - 1 : fx);
const double nfy = fy < 0 ? 0 : (fy > _height - 1 ? _height - 1 : fy);
const int x = (int)nfx, y = (int)nfy;
const double dx = nfx - x, dy = nfy - y;
const int px = x - 1 < 0 ? 0 : x - 1, nx = dx > 0 ? x + 1 : x, ax = x + 2 >= _width ? _width - 1 : x + 2;
const int py = y - 1 < 0 ? 0 : y - 1, ny = dy > 0 ? y + 1 : y, ay = y + 2 >= _height ? _height - 1 : y + 2;
const double
Ipp = (double)at(px, py, z, c), Icp = (double)at(x, py, z, c), Inp = (double)at(nx, py, z, c),
Iap = (double)at(ax, py, z, c),
Ip = Icp + 0.5f * (dx * (-Ipp + Inp) + dx * dx * (2 * Ipp - 5 * Icp + 4 * Inp - Iap) + dx * dx * dx * (-Ipp + 3 * Icp - 3 * Inp + Iap)),
Ipc = (double)at(px, y, z, c), Icc = (double)at(x, y, z, c), Inc = (double)at(nx, y, z, c),
Iac = (double)at(ax, y, z, c),
Ic = Icc + 0.5f * (dx * (-Ipc + Inc) + dx * dx * (2 * Ipc - 5 * Icc + 4 * Inc - Iac) + dx * dx * dx * (-Ipc + 3 * Icc - 3 * Inc + Iac)),
Ipn = (double)at(px, ny, z, c), Icn = (double)at(x, ny, z, c), Inn = (double)at(nx, ny, z, c),
Ian = (double)at(ax, ny, z, c),
In = Icn + 0.5f * (dx * (-Ipn + Inn) + dx * dx * (2 * Ipn - 5 * Icn + 4 * Inn - Ian) + dx * dx * dx * (-Ipn + 3 * Icn - 3 * Inn + Ian)),
Ipa = (double)at(px, ay, z, c), Ica = (double)at(x, ay, z, c), Ina = (double)at(nx, ay, z, c),
Iaa = (double)at(ax, ay, z, c),
Ia = Ica + 0.5f * (dx * (-Ipa + Ina) + dx * dx * (2 * Ipa - 5 * Ica + 4 * Ina - Iaa) + dx * dx * dx * (-Ipa + 3 * Ica - 3 * Ina + Iaa));
return Ic + 0.5f * (dy * (-Ip + In) + dy * dy * (2 * Ip - 5 * Ic + 4 * In - Ia) + dy * dy * dy * (-Ip + 3 * Ic - 3 * In + Ia));
}
DeviceImage::DeviceImage(size_t width, size_t height, size_t depth, size_t spectrum)
{
_width = width;
_height = height;
_depth = depth;
_spectrum = spectrum;
cudaMalloc(&_data, sizeof(unsigned char) * size());
}
DeviceImage::DeviceImage(const cimg_library::CImg<unsigned char>& image)
{
_width = image.width();
_height = image.height();
_depth = image.depth();
_spectrum = image.spectrum();
cudaMalloc(&_data, sizeof(unsigned char) * image.size());
cudaMemcpy(_data, image.data(), sizeof(unsigned char) * image.size(), cudaMemcpyHostToDevice);
}
DeviceImage::~DeviceImage()
{
cudaFree(_data);
}
|
be13653fdc7e72aede4a089001e64262c6d485cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include "orc_common.h"
#include "orc_gpu.h"
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
using cudf::detail::device_2dspan;
constexpr int scratch_buffer_size = 512 * 4;
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
constexpr bool zero_pll_war = true;
static __device__ __constant__ int64_t kORCTimeToUTC =
1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct byterle_enc_state_s {
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s {
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
};
struct strdata_enc_state_s {
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
const char *str_data[512];
};
struct orcenc_state_s {
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
encoder_chunk_streams stream;
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
StripeDictionary dict_stripe;
} u;
union {
uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer
uint32_t u32[scratch_buffer_size / 4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
uint64_t u64[1024];
} lengths;
};
static inline __device__ uint32_t zigzag(uint32_t v) { return v; }
static inline __device__ uint32_t zigzag(int32_t v)
{
int32_t s = (v >> 31);
return ((v ^ s) * 2) - s;
}
static inline __device__ uint64_t zigzag(uint64_t v) { return v; }
static inline __device__ uint64_t zigzag(int64_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(
orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
while (count > 0) {
uint32_t n = min(count, 512);
if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; }
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
}
/**
* @brief ByteRLE encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(
orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0) {
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run,
maxvals = min(numvals, 512);
if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0) {
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals) {
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask) {
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32) {
while (next == ~0) {
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3) {
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130) {
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130;
} else if (literal_run && literal_run + repeat_run == maxvals) {
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals) {
literal_run &= ~0x7f;
if (!literal_run) break;
}
if (literal_run > 0) {
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run) {
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0) {
while (repeat_run >= 130) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals) {
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
*/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = {
0, 7, 15, 23, 27, 28, 29, 30, 31};
/**
* @brief Encode a varint value, return the number of bytes written
*/
static inline __device__ uint32_t StoreVarint(uint8_t *dst, uint64_t v)
{
uint32_t bytecnt = 0;
for (;;) {
uint32_t c = (uint32_t)(v & 0x7f);
v >>= 7u;
if (v == 0) {
dst[bytecnt++] = c;
break;
} else {
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
template <class T>
static inline __device__ void StoreBytesBigEndian(uint8_t *dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i) {
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(
uint8_t *dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f)) {
uint32_t mask;
if (w <= 1) {
v = (v << 1) | (shuffle_xor(v, 1) & 0x1);
v = (v << 2) | (shuffle_xor(v, 2) & 0x3);
v = (v << 4) | (shuffle_xor(v, 4) & 0xf);
mask = 0x7;
} else if (w <= 2) {
v = (v << 2) | (shuffle_xor(v, 1) & 0x3);
v = (v << 4) | (shuffle_xor(v, 2) & 0xf);
mask = 0x3;
} else // if (w <= 4)
{
v = (v << 4) | (shuffle_xor(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); }
}
}
/**
* @brief Integer RLEv2 encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*
* @return number of input values encoded
*/
template <StreamIndexType cid,
class T,
bool is_signed,
uint32_t inmask,
int block_size,
typename Storage>
static __device__ uint32_t IntegerRLE(orcenc_state_s *s,
const T *inbuf,
uint32_t inpos,
uint32_t numvals,
uint32_t flush,
int t,
Storage &temp_storage)
{
using block_reduce = hipcub::BlockReduce<T, block_size>;
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
__shared__ volatile uint64_t block_vmin;
while (numvals > 0) {
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512),
literal_run, delta_run;
if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t) {
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals) {
if (delta_map != 0) {
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32) {
for (;;) {
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0) break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0) {
// Find min & max
T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max();
T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min();
uint32_t literal_mode, literal_w;
vmin = block_reduce(temp_storage).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = block_reduce(temp_storage).Reduce(vmax, hipcub::Max());
if (t == 0) {
uint32_t mode1_w, mode2_w;
typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2;
block_vmin = static_cast<uint64_t>(vmin);
if (sizeof(T) > 4) {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
} else {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1) {
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >=
// 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
} else {
uint32_t range, w;
if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4) {
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
} else {
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
__syncthreads();
vmin = static_cast<T>(block_vmin);
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1) {
// Direct mode
if (!t) {
dst[0] = 0x40 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
typename std::make_unsigned<T>::type zzv0 = v0;
if (t < literal_run) { zzv0 = zigzag(v0); }
if (literal_w < 8) {
StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t);
} else if (t < literal_run) {
StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3));
}
} else if (literal_mode == 2) {
// Patched base mode
if (!t) {
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7))
: (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
if (zero_pll_war) {
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
} else {
pll = 0;
}
dst[0] = 0x80 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed) {
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw * 8 - 1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
} else {
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0) {
if (t == literal_run) {
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = zigzag(v0);
if (delta == 0 && delta_run >= 3 && delta_run <= 10) {
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) {
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
} else {
// Delta
uint64_t delta_u = zigzag(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*/
static __device__ void StoreStringData(uint8_t *dst,
strdata_enc_state_s *strenc,
uint32_t len,
int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n < 32; n <<= 1) {
uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; }
dst += pos - len;
__syncthreads();
if (t < 32) {
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n < 16; n <<= 1) {
uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; }
if (wt == 0xf) {
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); }
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief Timestamp scale table (powers of 10)
*/
static const __device__ __constant__ int32_t kTimeScale[10] = {
1000000000, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1};
/**
* @brief Encode column data
*
* @param[in] chunks encoder chunks device array [column][rowgroup]
* @param[in, out] chunks cunk streams device array [column][rowgroup]
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ union {
typename hipcub::BlockReduce<int32_t, block_size>::TempStorage i32;
typename hipcub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename hipcub::BlockReduce<uint64_t, block_size>::TempStorage u64;
} temp_storage;
orcenc_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[col_id][group_id];
s->stream = streams[col_id][group_id];
}
if (t < CI_NUM_STREAMS) { s->strm_pos[t] = 0; }
__syncthreads();
if (!t) {
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
// Dictionary data is encoded in a separate kernel
if (s->chunk.encoding_kind == DICTIONARY_V2) {
s->strm_pos[CI_DATA2] = s->stream.lengths[CI_DATA2];
s->strm_pos[CI_DICTIONARY] = s->stream.lengths[CI_DICTIONARY];
}
}
__syncthreads();
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) {
// Encode valid map
if (s->present_rows < s->chunk.num_rows) {
uint32_t present_rows = s->present_rows;
uint32_t nrows = min(s->chunk.num_rows - present_rows,
512 * 8 - (present_rows - (min(s->cur_row, s->present_out) & ~7)));
uint32_t nrows_out;
if (t * 8 < nrows) {
uint32_t row = s->chunk.start_row + present_rows + t * 8;
uint8_t valid = 0;
if (row < s->chunk.leaf_column->size()) {
if (s->chunk.leaf_column->nullable()) {
size_type current_valid_offset = row + s->chunk.leaf_column->offset();
size_type next_valid_offset =
current_valid_offset + min(32, s->chunk.leaf_column->size());
bitmask_type mask = cudf::detail::get_mask_offset_word(
s->chunk.leaf_column->null_mask(), 0, current_valid_offset, next_valid_offset);
valid = 0xff & mask;
} else {
valid = 0xff;
}
if (row + 7 > s->chunk.leaf_column->size()) {
valid = valid & ((1 << (s->chunk.leaf_column->size() & 7)) - 1);
}
}
s->valid_buf[(row >> 3) & 0x1ff] = valid;
}
__syncthreads();
present_rows += nrows;
if (!t) { s->present_rows = present_rows; }
// RLE encode the present stream
nrows_out =
present_rows -
s->present_out; // Should always be a multiple of 8 except at the end of the last row group
if (nrows_out > ((present_rows < s->chunk.num_rows) ? 130 * 8 : 0)) {
uint32_t present_out = s->present_out;
if (s->stream.ids[CI_PRESENT] >= 0) {
uint32_t flush = (present_rows < s->chunk.num_rows) ? 0 : 7;
nrows_out = (nrows_out + flush) >> 3;
nrows_out =
ByteRLE<CI_PRESENT, 0x1ff>(
s, s->valid_buf, (s->chunk.start_row + present_out) >> 3, nrows_out, flush, t) *
8;
}
__syncthreads();
if (!t) { s->present_out = min(present_out + nrows_out, present_rows); }
}
__syncthreads();
}
// Fetch non-null values
if (!s->stream.data_ptrs[CI_DATA]) {
// Pass-through
__syncthreads();
if (!t) {
s->cur_row = s->present_rows;
s->strm_pos[CI_DATA] = s->cur_row * s->chunk.dtype_len;
}
__syncthreads();
} else if (s->cur_row < s->present_rows) {
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows =
min(min(s->present_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), 512);
uint32_t row = s->chunk.start_row + s->cur_row + t;
uint32_t valid = (t < nrows) ? (s->valid_buf[(row >> 3) & 0x1ff] >> (row & 7)) & 1 : 0;
s->buf.u32[t] = valid;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, 512, t);
__syncthreads();
if (valid) {
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
switch (s->chunk.type_kind) {
case INT:
case DATE:
case FLOAT: s->vals.u32[nz_idx] = s->chunk.leaf_column->element<uint32_t>(row); break;
case DOUBLE:
case LONG: s->vals.u64[nz_idx] = s->chunk.leaf_column->element<uint64_t>(row); break;
case SHORT: s->vals.u32[nz_idx] = s->chunk.leaf_column->element<uint16_t>(row); break;
case BOOLEAN:
case BYTE: s->vals.u8[nz_idx] = s->chunk.leaf_column->element<uint8_t>(row); break;
case TIMESTAMP: {
int64_t ts = s->chunk.leaf_column->element<int64_t>(row);
int32_t ts_scale = kTimeScale[min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int64_t nanos = (ts - seconds * ts_scale);
// There is a bug in the ORC spec such that for negative timestamps, it is understood
// between the writer and reader that nanos will be adjusted to their positive component
// but the negative seconds will be left alone. This means that -2.6 is encoded as
// seconds = -2 and nanos = 1+(-0.6) = 0.4
// This leads to an error in decoding time where -1 < time (s) < 0
// Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925
if (nanos < 0) { nanos += ts_scale; }
s->vals.i64[nz_idx] = seconds - kORCTimeToUTC;
if (nanos != 0) {
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= kTimeScale[9 - min(s->chunk.scale, 9)];
if (!(nanos % 100)) {
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10)) {
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u64[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
uint32_t dict_idx = s->chunk.dict_index[row];
if (dict_idx > 0x7fffffffu) dict_idx = s->chunk.dict_index[dict_idx & 0x7fffffffu];
s->vals.u32[nz_idx] = dict_idx;
} else {
string_view value = s->chunk.leaf_column->element<string_view>(row);
s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data();
s->lengths.u32[nz_idx] = value.size_bytes();
}
break;
default: break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) {
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; }
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n) {
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) |
((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) |
((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) |
((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) |
((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) |
((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) |
((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) |
((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t) {
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP ||
(s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2))
? nz
: 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind) {
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>(
s, s->vals.i32, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.i32);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>(
s, s->vals.i64, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.i64);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s,
s->lengths.u8,
(s->nnz - s->numvals + flush) >> 3,
(s->numvals + flush) >> 3,
flush,
t) *
8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(
s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>(
s, s->vals.u32, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.u32);
} else {
n = s->numvals;
}
break;
default: n = s->numvals; break;
}
__syncthreads();
if (!t) { s->numvals -= min(n, s->numvals); }
}
// Encode secondary stream values
if (s->numlengths > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 1 : 0, n;
switch (s->chunk.type_kind) {
case TIMESTAMP:
n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>(
s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, flush, t, temp_storage.u64);
break;
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, flush, t, temp_storage.u32);
break;
default: n = s->numlengths; break;
}
__syncthreads();
if (!t) { s->numlengths -= min(n, s->numlengths); }
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->stream.ids[t] >= 0) {
// Update actual compressed length
streams[col_id][group_id].lengths[t] = s->strm_pos[t];
if (!s->stream.data_ptrs[t]) {
streams[col_id][group_id].data_ptrs[t] =
static_cast<uint8_t *>(const_cast<void *>(s->chunk.leaf_column->head())) +
(s->chunk.leaf_column->offset() + s->chunk.start_row) * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeStringDictionaries(StripeDictionary *stripes,
device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcenc_state_s *const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
int t = threadIdx.x;
if (t == 0) s->u.dict_stripe = stripes[stripe_id];
__syncthreads();
auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk];
if (t == 0) {
s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk];
s->stream = *strm_ptr;
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe.num_strings;
s->cur_row = 0;
}
column_device_view *string_column = s->u.dict_stripe.leaf_column;
auto const dict_data = s->u.dict_stripe.dict_data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2) {
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0) {
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY) {
// Encoding string contents
const char *ptr = 0;
uint32_t count = 0;
if (t < numvals) {
auto string_val = string_column->element<string_view>(string_idx);
ptr = string_val.data();
count = string_val.size_bytes();
}
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY],
&s->u.strenc,
(ptr) ? count : 0,
t);
if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; }
} else {
// Encoding string lengths
uint32_t count =
(t < numvals)
? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes())
: 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals) s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0) {
uint32_t flush = (s->cur_row + numvals == s->nrows) ? 1 : 0;
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->cur_row, s->numlengths + numvals, flush, t, temp_storage);
__syncthreads();
if (!t) {
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0) { s->cur_row += numvals; }
__syncthreads();
}
if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; }
}
__global__ void __launch_bounds__(512)
gpu_set_chunk_columns(const table_device_view view, device_2dspan<EncChunk> chunks)
{
// Set leaf_column member of EncChunk
for (size_type i = threadIdx.x; i < chunks.size().second; i += blockDim.x) {
chunks[blockIdx.x][i].leaf_column = view.begin() + blockIdx.x;
}
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] streams TODO
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) StripeStream ss;
__shared__ __align__(16) encoder_chunk_streams strm0;
__shared__ uint8_t *volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
strm0 = streams[ss.column_id][ss.first_chunk_id];
}
__syncthreads();
auto const cid = ss.stream_type;
auto dst_ptr = strm0.data_ptrs[cid] + strm0.lengths[cid];
for (auto group = ss.first_chunk_id + 1; group < ss.first_chunk_id + ss.num_chunks; ++group) {
uint8_t *src_ptr;
uint32_t len;
if (t == 0) {
src_ptr = streams[ss.column_id][group].data_ptrs[cid];
len = streams[ss.column_id][group].lengths[cid];
if (src_ptr != dst_ptr) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; }
ck_curptr_g = src_ptr;
ck_curlen_g = len;
}
__syncthreads();
src_ptr = ck_curptr_g;
len = ck_curlen_g;
if (len > 0 && src_ptr != dst_ptr) {
for (uint32_t i = 0; i < len; i += 1024) {
uint8_t v = (i + t < len) ? src_ptr[i + t] : 0;
__syncthreads();
if (i + t < len) { dst_ptr[i + t] = v; }
}
}
dst_ptr += len;
__syncthreads();
}
if (!t) { strm_desc[stripe_id][stream_id].stream_size = dst_ptr - strm0.data_ptrs[cid]; }
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*/
// blockDim {256,1,1}
__global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc,
device_2dspan<encoder_chunk_streams> streams, // const?
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
uint8_t *compressed_bfr,
uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t *volatile uncomp_base_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256) {
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
blk_in->srcDevice = src + b * comp_blk_size;
blk_in->srcSize = blk_size;
blk_in->dstDevice = dst + b * (3 + comp_blk_size) + 3; // reserve 3 bytes for block header
blk_in->dstSize = blk_size;
blk_out->bytes_written = blk_size;
blk_out->status = 1;
blk_out->reserved = 0;
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length
*fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] comp_in Per-block compression input parameters
* @param[in] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
uint8_t *compressed_bfr,
uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ const uint8_t *volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
const uint8_t *src;
uint8_t *dst;
if (t == 0) ss = strm_desc[stripe_id][stream_id];
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr + ss.bfr_offset;
b = 0;
do {
if (t == 0) {
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t src_len =
min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len;
uint32_t blk_size24;
if (dst_len >= src_len) {
// Copy from uncompressed source
src = static_cast<const uint8_t *>(blk_in->srcDevice);
blk_out->bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
} else {
// Compressed block
src = static_cast<const uint8_t *>(blk_in->dstDevice);
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst) {
for (uint32_t i = 0; i < blk_size; i += 1024) {
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size) { dst[i + t] = v; }
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0) {
strm_desc[stripe_id][stream_id].stream_size =
static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset));
}
}
void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(chunks.size().first, chunks.size().second);
hipLaunchKernelGGL(( gpuEncodeOrcColumnData<512>), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, streams);
}
void EncodeStripeDictionaries(StripeDictionary *stripes,
device_2dspan<EncChunk const> chunks,
uint32_t num_string_columns,
uint32_t num_stripes,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
hipLaunchKernelGGL(( gpuEncodeStringDictionaries<512>)
, dim3(dim_grid), dim3(dim_block), 0, stream.value(), stripes, chunks, enc_streams);
}
void set_chunk_columns(const table_device_view &view,
device_2dspan<EncChunk> chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1);
dim3 dim_grid(chunks.size().first, 1);
hipLaunchKernelGGL(( gpu_set_chunk_columns), dim3(dim_grid), dim3(dim_block), 0, stream.value(), view, chunks);
}
void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(1024, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
hipLaunchKernelGGL(( gpuCompactOrcDataStreams), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_desc, enc_streams);
}
void CompressOrcDataStreams(uint8_t *compressed_data,
uint32_t num_compressed_blocks,
CompressionKind compression,
uint32_t comp_blk_size,
device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
rmm::cuda_stream_view stream)
{
dim3 dim_block_init(256, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
hipLaunchKernelGGL(( gpuInitCompressionBlocks), dim3(dim_grid), dim3(dim_block_init), 0, stream.value(),
strm_desc, enc_streams, comp_in, comp_out, compressed_data, comp_blk_size);
if (compression == SNAPPY) { gpu_snap(comp_in, comp_out, num_compressed_blocks, stream); }
dim3 dim_block_compact(1024, 1);
hipLaunchKernelGGL(( gpuCompactCompressedBlocks), dim3(dim_grid), dim3(dim_block_compact), 0, stream.value(),
strm_desc, comp_in, comp_out, compressed_data, comp_blk_size);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
be13653fdc7e72aede4a089001e64262c6d485cb.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <cudf/column/column_device_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include "orc_common.h"
#include "orc_gpu.h"
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
using cudf::detail::device_2dspan;
constexpr int scratch_buffer_size = 512 * 4;
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
constexpr bool zero_pll_war = true;
static __device__ __constant__ int64_t kORCTimeToUTC =
1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct byterle_enc_state_s {
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s {
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
};
struct strdata_enc_state_s {
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
const char *str_data[512];
};
struct orcenc_state_s {
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
encoder_chunk_streams stream;
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
StripeDictionary dict_stripe;
} u;
union {
uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer
uint32_t u32[scratch_buffer_size / 4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
uint64_t u64[1024];
} lengths;
};
static inline __device__ uint32_t zigzag(uint32_t v) { return v; }
static inline __device__ uint32_t zigzag(int32_t v)
{
int32_t s = (v >> 31);
return ((v ^ s) * 2) - s;
}
static inline __device__ uint64_t zigzag(uint64_t v) { return v; }
static inline __device__ uint64_t zigzag(int64_t v)
{
int64_t s = (v < 0) ? 1 : 0;
return ((v ^ -s) * 2) + s;
}
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(
orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
while (count > 0) {
uint32_t n = min(count, 512);
if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; }
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
}
/**
* @brief ByteRLE encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*/
template <StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(
orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0) {
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run,
maxvals = min(numvals, 512);
if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0) {
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals) {
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask) {
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32) {
while (next == ~0) {
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3) {
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130) {
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130;
} else if (literal_run && literal_run + repeat_run == maxvals) {
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals) {
literal_run &= ~0x7f;
if (!literal_run) break;
}
if (literal_run > 0) {
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run) {
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0) {
while (repeat_run >= 130) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals) {
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3) {
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
*/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = {
0, 7, 15, 23, 27, 28, 29, 30, 31};
/**
* @brief Encode a varint value, return the number of bytes written
*/
static inline __device__ uint32_t StoreVarint(uint8_t *dst, uint64_t v)
{
uint32_t bytecnt = 0;
for (;;) {
uint32_t c = (uint32_t)(v & 0x7f);
v >>= 7u;
if (v == 0) {
dst[bytecnt++] = c;
break;
} else {
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
template <class T>
static inline __device__ void StoreBytesBigEndian(uint8_t *dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i) {
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(
uint8_t *dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f)) {
uint32_t mask;
if (w <= 1) {
v = (v << 1) | (shuffle_xor(v, 1) & 0x1);
v = (v << 2) | (shuffle_xor(v, 2) & 0x3);
v = (v << 4) | (shuffle_xor(v, 4) & 0xf);
mask = 0x7;
} else if (w <= 2) {
v = (v << 2) | (shuffle_xor(v, 1) & 0x3);
v = (v << 4) | (shuffle_xor(v, 2) & 0xf);
mask = 0x3;
} else // if (w <= 4)
{
v = (v << 4) | (shuffle_xor(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); }
}
}
/**
* @brief Integer RLEv2 encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at
*streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
* @param[in] temp_storage shared memory storage to perform block reduce
*
* @return number of input values encoded
*/
template <StreamIndexType cid,
class T,
bool is_signed,
uint32_t inmask,
int block_size,
typename Storage>
static __device__ uint32_t IntegerRLE(orcenc_state_s *s,
const T *inbuf,
uint32_t inpos,
uint32_t numvals,
uint32_t flush,
int t,
Storage &temp_storage)
{
using block_reduce = cub::BlockReduce<T, block_size>;
uint8_t *dst = s->stream.data_ptrs[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
__shared__ volatile uint64_t block_vmin;
while (numvals > 0) {
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512),
literal_run, delta_run;
if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t) {
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals) {
if (delta_map != 0) {
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32) {
for (;;) {
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0) break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0) {
// Find min & max
T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max();
T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min();
uint32_t literal_mode, literal_w;
vmin = block_reduce(temp_storage).Reduce(vmin, cub::Min());
__syncthreads();
vmax = block_reduce(temp_storage).Reduce(vmax, cub::Max());
if (t == 0) {
uint32_t mode1_w, mode2_w;
typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2;
block_vmin = static_cast<uint64_t>(vmin);
if (sizeof(T) > 4) {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
} else {
vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1) {
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >=
// 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
} else {
uint32_t range, w;
if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4) {
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
} else {
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
__syncthreads();
vmin = static_cast<T>(block_vmin);
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1) {
// Direct mode
if (!t) {
dst[0] = 0x40 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
typename std::make_unsigned<T>::type zzv0 = v0;
if (t < literal_run) { zzv0 = zigzag(v0); }
if (literal_w < 8) {
StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t);
} else if (t < literal_run) {
StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3));
}
} else if (literal_mode == 2) {
// Patched base mode
if (!t) {
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7))
: (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
if (zero_pll_war) {
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
} else {
pll = 0;
}
dst[0] = 0x80 +
((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 +
((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed) {
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw * 8 - 1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
} else {
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0) {
if (t == literal_run) {
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = zigzag(v0);
if (delta == 0 && delta_run >= 3 && delta_run <= 10) {
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) {
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
} else {
// Delta
uint64_t delta_u = zigzag(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); }
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*/
static __device__ void StoreStringData(uint8_t *dst,
strdata_enc_state_s *strenc,
uint32_t len,
int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n < 32; n <<= 1) {
uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; }
dst += pos - len;
__syncthreads();
if (t < 32) {
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n < 16; n <<= 1) {
uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; }
if (wt == 0xf) {
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); }
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*/
template <class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n < numvals; n <<= 1) {
__syncthreads();
if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief Timestamp scale table (powers of 10)
*/
static const __device__ __constant__ int32_t kTimeScale[10] = {
1000000000, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1};
/**
* @brief Encode column data
*
* @param[in] chunks encoder chunks device array [column][rowgroup]
* @param[in, out] chunks cunk streams device array [column][rowgroup]
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ union {
typename cub::BlockReduce<int32_t, block_size>::TempStorage i32;
typename cub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename cub::BlockReduce<uint64_t, block_size>::TempStorage u64;
} temp_storage;
orcenc_state_s *const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t == 0) {
s->chunk = chunks[col_id][group_id];
s->stream = streams[col_id][group_id];
}
if (t < CI_NUM_STREAMS) { s->strm_pos[t] = 0; }
__syncthreads();
if (!t) {
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
// Dictionary data is encoded in a separate kernel
if (s->chunk.encoding_kind == DICTIONARY_V2) {
s->strm_pos[CI_DATA2] = s->stream.lengths[CI_DATA2];
s->strm_pos[CI_DICTIONARY] = s->stream.lengths[CI_DICTIONARY];
}
}
__syncthreads();
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) {
// Encode valid map
if (s->present_rows < s->chunk.num_rows) {
uint32_t present_rows = s->present_rows;
uint32_t nrows = min(s->chunk.num_rows - present_rows,
512 * 8 - (present_rows - (min(s->cur_row, s->present_out) & ~7)));
uint32_t nrows_out;
if (t * 8 < nrows) {
uint32_t row = s->chunk.start_row + present_rows + t * 8;
uint8_t valid = 0;
if (row < s->chunk.leaf_column->size()) {
if (s->chunk.leaf_column->nullable()) {
size_type current_valid_offset = row + s->chunk.leaf_column->offset();
size_type next_valid_offset =
current_valid_offset + min(32, s->chunk.leaf_column->size());
bitmask_type mask = cudf::detail::get_mask_offset_word(
s->chunk.leaf_column->null_mask(), 0, current_valid_offset, next_valid_offset);
valid = 0xff & mask;
} else {
valid = 0xff;
}
if (row + 7 > s->chunk.leaf_column->size()) {
valid = valid & ((1 << (s->chunk.leaf_column->size() & 7)) - 1);
}
}
s->valid_buf[(row >> 3) & 0x1ff] = valid;
}
__syncthreads();
present_rows += nrows;
if (!t) { s->present_rows = present_rows; }
// RLE encode the present stream
nrows_out =
present_rows -
s->present_out; // Should always be a multiple of 8 except at the end of the last row group
if (nrows_out > ((present_rows < s->chunk.num_rows) ? 130 * 8 : 0)) {
uint32_t present_out = s->present_out;
if (s->stream.ids[CI_PRESENT] >= 0) {
uint32_t flush = (present_rows < s->chunk.num_rows) ? 0 : 7;
nrows_out = (nrows_out + flush) >> 3;
nrows_out =
ByteRLE<CI_PRESENT, 0x1ff>(
s, s->valid_buf, (s->chunk.start_row + present_out) >> 3, nrows_out, flush, t) *
8;
}
__syncthreads();
if (!t) { s->present_out = min(present_out + nrows_out, present_rows); }
}
__syncthreads();
}
// Fetch non-null values
if (!s->stream.data_ptrs[CI_DATA]) {
// Pass-through
__syncthreads();
if (!t) {
s->cur_row = s->present_rows;
s->strm_pos[CI_DATA] = s->cur_row * s->chunk.dtype_len;
}
__syncthreads();
} else if (s->cur_row < s->present_rows) {
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows =
min(min(s->present_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), 512);
uint32_t row = s->chunk.start_row + s->cur_row + t;
uint32_t valid = (t < nrows) ? (s->valid_buf[(row >> 3) & 0x1ff] >> (row & 7)) & 1 : 0;
s->buf.u32[t] = valid;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, 512, t);
__syncthreads();
if (valid) {
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
switch (s->chunk.type_kind) {
case INT:
case DATE:
case FLOAT: s->vals.u32[nz_idx] = s->chunk.leaf_column->element<uint32_t>(row); break;
case DOUBLE:
case LONG: s->vals.u64[nz_idx] = s->chunk.leaf_column->element<uint64_t>(row); break;
case SHORT: s->vals.u32[nz_idx] = s->chunk.leaf_column->element<uint16_t>(row); break;
case BOOLEAN:
case BYTE: s->vals.u8[nz_idx] = s->chunk.leaf_column->element<uint8_t>(row); break;
case TIMESTAMP: {
int64_t ts = s->chunk.leaf_column->element<int64_t>(row);
int32_t ts_scale = kTimeScale[min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int64_t nanos = (ts - seconds * ts_scale);
// There is a bug in the ORC spec such that for negative timestamps, it is understood
// between the writer and reader that nanos will be adjusted to their positive component
// but the negative seconds will be left alone. This means that -2.6 is encoded as
// seconds = -2 and nanos = 1+(-0.6) = 0.4
// This leads to an error in decoding time where -1 < time (s) < 0
// Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925
if (nanos < 0) { nanos += ts_scale; }
s->vals.i64[nz_idx] = seconds - kORCTimeToUTC;
if (nanos != 0) {
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= kTimeScale[9 - min(s->chunk.scale, 9)];
if (!(nanos % 100)) {
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10)) {
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u64[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
uint32_t dict_idx = s->chunk.dict_index[row];
if (dict_idx > 0x7fffffffu) dict_idx = s->chunk.dict_index[dict_idx & 0x7fffffffu];
s->vals.u32[nz_idx] = dict_idx;
} else {
string_view value = s->chunk.leaf_column->element<string_view>(row);
s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data();
s->lengths.u32[nz_idx] = value.size_bytes();
}
break;
default: break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) {
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; }
__syncthreads();
} else if (s->chunk.type_kind == BOOLEAN) {
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n) {
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) |
((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) |
((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) |
((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) |
((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) |
((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) |
((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) |
((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t) {
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP ||
(s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2))
? nz
: 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind) {
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>(
s, s->vals.i32, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.i32);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>(
s, s->vals.i64, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.i64);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s,
s->lengths.u8,
(s->nnz - s->numvals + flush) >> 3,
(s->numvals + flush) >> 3,
flush,
t) *
8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(
s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>(
s, s->vals.u32, s->nnz - s->numvals, s->numvals, flush, t, temp_storage.u32);
} else {
n = s->numvals;
}
break;
default: n = s->numvals; break;
}
__syncthreads();
if (!t) { s->numvals -= min(n, s->numvals); }
}
// Encode secondary stream values
if (s->numlengths > 0) {
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 1 : 0, n;
switch (s->chunk.type_kind) {
case TIMESTAMP:
n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>(
s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, flush, t, temp_storage.u64);
break;
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, flush, t, temp_storage.u32);
break;
default: n = s->numlengths; break;
}
__syncthreads();
if (!t) { s->numlengths -= min(n, s->numlengths); }
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->stream.ids[t] >= 0) {
// Update actual compressed length
streams[col_id][group_id].lengths[t] = s->strm_pos[t];
if (!s->stream.data_ptrs[t]) {
streams[col_id][group_id].data_ptrs[t] =
static_cast<uint8_t *>(const_cast<void *>(s->chunk.leaf_column->head())) +
(s->chunk.leaf_column->offset() + s->chunk.start_row) * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuEncodeStringDictionaries(StripeDictionary *stripes,
device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) orcenc_state_s state_g;
__shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage;
orcenc_state_s *const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
int t = threadIdx.x;
if (t == 0) s->u.dict_stripe = stripes[stripe_id];
__syncthreads();
auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk];
if (t == 0) {
s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk];
s->stream = *strm_ptr;
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe.num_strings;
s->cur_row = 0;
}
column_device_view *string_column = s->u.dict_stripe.leaf_column;
auto const dict_data = s->u.dict_stripe.dict_data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2) {
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0) {
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY) {
// Encoding string contents
const char *ptr = 0;
uint32_t count = 0;
if (t < numvals) {
auto string_val = string_column->element<string_view>(string_idx);
ptr = string_val.data();
count = string_val.size_bytes();
}
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY],
&s->u.strenc,
(ptr) ? count : 0,
t);
if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; }
} else {
// Encoding string lengths
uint32_t count =
(t < numvals)
? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes())
: 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals) s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0) {
uint32_t flush = (s->cur_row + numvals == s->nrows) ? 1 : 0;
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>(
s, s->lengths.u32, s->cur_row, s->numlengths + numvals, flush, t, temp_storage);
__syncthreads();
if (!t) {
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0) { s->cur_row += numvals; }
__syncthreads();
}
if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; }
}
__global__ void __launch_bounds__(512)
gpu_set_chunk_columns(const table_device_view view, device_2dspan<EncChunk> chunks)
{
// Set leaf_column member of EncChunk
for (size_type i = threadIdx.x; i < chunks.size().second; i += blockDim.x) {
chunks[blockIdx.x][i].leaf_column = view.begin() + blockIdx.x;
}
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] streams TODO
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> streams)
{
__shared__ __align__(16) StripeStream ss;
__shared__ __align__(16) encoder_chunk_streams strm0;
__shared__ uint8_t *volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
strm0 = streams[ss.column_id][ss.first_chunk_id];
}
__syncthreads();
auto const cid = ss.stream_type;
auto dst_ptr = strm0.data_ptrs[cid] + strm0.lengths[cid];
for (auto group = ss.first_chunk_id + 1; group < ss.first_chunk_id + ss.num_chunks; ++group) {
uint8_t *src_ptr;
uint32_t len;
if (t == 0) {
src_ptr = streams[ss.column_id][group].data_ptrs[cid];
len = streams[ss.column_id][group].lengths[cid];
if (src_ptr != dst_ptr) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; }
ck_curptr_g = src_ptr;
ck_curlen_g = len;
}
__syncthreads();
src_ptr = ck_curptr_g;
len = ck_curlen_g;
if (len > 0 && src_ptr != dst_ptr) {
for (uint32_t i = 0; i < len; i += 1024) {
uint8_t v = (i + t < len) ? src_ptr[i + t] : 0;
__syncthreads();
if (i + t < len) { dst_ptr[i + t] = v; }
}
}
dst_ptr += len;
__syncthreads();
}
if (!t) { strm_desc[stripe_id][stream_id].stream_size = dst_ptr - strm0.data_ptrs[cid]; }
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*/
// blockDim {256,1,1}
__global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc,
device_2dspan<encoder_chunk_streams> streams, // const?
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
uint8_t *compressed_bfr,
uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t *volatile uncomp_base_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t == 0) {
ss = strm_desc[stripe_id][stream_id];
uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256) {
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
blk_in->srcDevice = src + b * comp_blk_size;
blk_in->srcSize = blk_size;
blk_in->dstDevice = dst + b * (3 + comp_blk_size) + 3; // reserve 3 bytes for block header
blk_in->dstSize = blk_size;
blk_out->bytes_written = blk_size;
blk_out->status = 1;
blk_out->reserved = 0;
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length
*fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] comp_in Per-block compression input parameters
* @param[in] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*/
// blockDim {1024,1,1}
__global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
uint8_t *compressed_bfr,
uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ const uint8_t *volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
auto const stripe_id = blockIdx.x;
auto const stream_id = blockIdx.y;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
const uint8_t *src;
uint8_t *dst;
if (t == 0) ss = strm_desc[stripe_id][stream_id];
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr + ss.bfr_offset;
b = 0;
do {
if (t == 0) {
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t src_len =
min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len;
uint32_t blk_size24;
if (dst_len >= src_len) {
// Copy from uncompressed source
src = static_cast<const uint8_t *>(blk_in->srcDevice);
blk_out->bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
} else {
// Compressed block
src = static_cast<const uint8_t *>(blk_in->dstDevice);
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst) {
for (uint32_t i = 0; i < blk_size; i += 1024) {
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size) { dst[i + t] = v; }
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0) {
strm_desc[stripe_id][stream_id].stream_size =
static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset));
}
}
void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks,
device_2dspan<encoder_chunk_streams> streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(chunks.size().first, chunks.size().second);
gpuEncodeOrcColumnData<512><<<dim_grid, dim_block, 0, stream.value()>>>(chunks, streams);
}
void EncodeStripeDictionaries(StripeDictionary *stripes,
device_2dspan<EncChunk const> chunks,
uint32_t num_string_columns,
uint32_t num_stripes,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
gpuEncodeStringDictionaries<512>
<<<dim_grid, dim_block, 0, stream.value()>>>(stripes, chunks, enc_streams);
}
void set_chunk_columns(const table_device_view &view,
device_2dspan<EncChunk> chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(512, 1);
dim3 dim_grid(chunks.size().first, 1);
gpu_set_chunk_columns<<<dim_grid, dim_block, 0, stream.value()>>>(view, chunks);
}
void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
rmm::cuda_stream_view stream)
{
dim3 dim_block(1024, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
gpuCompactOrcDataStreams<<<dim_grid, dim_block, 0, stream.value()>>>(strm_desc, enc_streams);
}
void CompressOrcDataStreams(uint8_t *compressed_data,
uint32_t num_compressed_blocks,
CompressionKind compression,
uint32_t comp_blk_size,
device_2dspan<StripeStream> strm_desc,
device_2dspan<encoder_chunk_streams> enc_streams,
gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out,
rmm::cuda_stream_view stream)
{
dim3 dim_block_init(256, 1);
dim3 dim_grid(strm_desc.size().first, strm_desc.size().second);
gpuInitCompressionBlocks<<<dim_grid, dim_block_init, 0, stream.value()>>>(
strm_desc, enc_streams, comp_in, comp_out, compressed_data, comp_blk_size);
if (compression == SNAPPY) { gpu_snap(comp_in, comp_out, num_compressed_blocks, stream); }
dim3 dim_block_compact(1024, 1);
gpuCompactCompressedBlocks<<<dim_grid, dim_block_compact, 0, stream.value()>>>(
strm_desc, comp_in, comp_out, compressed_data, comp_blk_size);
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
4ee2d72ae2166d7cd6d1a24ebae5bb088f8c13c0.hip
|
// !!! This is a file automatically generated by hipify!!!
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
using namespace std;
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
void EratosthenesSieveNaive(unsigned long n);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
hipError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
#pragma unroll
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
// Shared memory use for S in range of thread
extern __shared__ bool shared_range[];
// Initialize to True
if (threadIdx.x == 0)
{
for (int j = 0; j < range; j++)
{
shared_range[j] = true;
}
}
//__syncthreads();
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
shared_range[x - L] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(shared_range[(q * f) - L])) shared_range[(q * f) - L] = false;
f += d_wheel.dist[f % m];
}
}
}
//__syncthreads();
/* Copy shared into global */
if (threadIdx.x == 0)
{
for (int j = 0; j < range; j++)
{
d_S[j + L] = shared_range[j];
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoul(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
EratosthenesSieveNaive(N);
//hipError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1 CPU");
/*if (check_cuda_status)
{
if (x != hipSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}*/
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
printf("Found stuff\n");
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
void EratosthenesSieveNaive(unsigned long n)
{
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < n; i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
}
hipError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
hipError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != hipSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
hipError_t cudaStatus;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
hipEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = hipMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = hipMemcpy(d_S, S, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
//parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
//parallelSieveKernel2<<<gridSize, blockSize, range>>>(n, k, m, wheel, range, d_S);
cudaStatus = hipGetLastError();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = hipMemcpy(S, d_S, n * sizeof(bool), hipMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// hipFree
return cleanup(d_S, d_wheel, cudaStatus);
}
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus)
{
hipFree(d_S);
hipFree(wheel.rp);
hipFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
4ee2d72ae2166d7cd6d1a24ebae5bb088f8c13c0.cu
|
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
using namespace std;
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
void EratosthenesSieveNaive(unsigned long n);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
cudaError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
#pragma unroll
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
// Shared memory use for S in range of thread
extern __shared__ bool shared_range[];
// Initialize to True
if (threadIdx.x == 0)
{
for (int j = 0; j < range; j++)
{
shared_range[j] = true;
}
}
//__syncthreads();
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
shared_range[x - L] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(shared_range[(q * f) - L])) shared_range[(q * f) - L] = false;
f += d_wheel.dist[f % m];
}
}
}
//__syncthreads();
/* Copy shared into global */
if (threadIdx.x == 0)
{
for (int j = 0; j < range; j++)
{
d_S[j + L] = shared_range[j];
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoul(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
EratosthenesSieveNaive(N);
//cudaError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1 CPU");
/*if (check_cuda_status)
{
if (x != cudaSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}*/
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
printf("Found stuff\n");
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
void EratosthenesSieveNaive(unsigned long n)
{
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < n; i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
}
cudaError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
cudaError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != cudaSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
cudaError_t cudaStatus;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
cudaEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = cudaMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = cudaMemcpy(d_S, S, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
//parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
//parallelSieveKernel2<<<gridSize, blockSize, range>>>(n, k, m, wheel, range, d_S);
cudaStatus = cudaGetLastError();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = cudaMemcpy(S, d_S, n * sizeof(bool), cudaMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus)
{
cudaFree(d_S);
cudaFree(wheel.rp);
cudaFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
69592040bb4a95b4080e8bc7c542c5ec4cf9fcf1.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <cub/device/device_merge_sort.cuh>
#include <nvbench_helper.cuh>
// %RANGE% TUNE_TRANSPOSE trp 0:1:1
// %RANGE% TUNE_LOAD ld 0:2:1
// %RANGE% TUNE_ITEMS_PER_THREAD ipt 7:24:1
// %RANGE% TUNE_THREADS_PER_BLOCK_POW2 tpb 6:10:1
#ifndef TUNE_BASE
#define TUNE_THREADS_PER_BLOCK (1 << TUNE_THREADS_PER_BLOCK_POW2)
#endif // TUNE_BASE
using value_t = cub::NullType;
#if !TUNE_BASE
#if TUNE_TRANSPOSE == 0
#define TUNE_LOAD_ALGORITHM cub::BLOCK_LOAD_DIRECT
#define TUNE_STORE_ALGORITHM cub::BLOCK_STORE_DIRECT
#else // TUNE_TRANSPOSE == 1
#define TUNE_LOAD_ALGORITHM cub::BLOCK_LOAD_WARP_TRANSPOSE
#define TUNE_STORE_ALGORITHM cub::BLOCK_STORE_WARP_TRANSPOSE
#endif // TUNE_TRANSPOSE
#if TUNE_LOAD == 0
#define TUNE_LOAD_MODIFIER cub::LOAD_DEFAULT
#elif TUNE_LOAD == 1
#define TUNE_LOAD_MODIFIER cub::LOAD_LDG
#else // TUNE_LOAD == 2
#define TUNE_LOAD_MODIFIER cub::LOAD_CA
#endif // TUNE_LOAD
template <typename KeyT>
struct policy_hub_t
{
struct policy_t : cub::ChainedPolicy<300, policy_t, policy_t>
{
using MergeSortPolicy =
cub::AgentMergeSortPolicy<TUNE_THREADS_PER_BLOCK,
cub::Nominal4BItemsToItems<KeyT>(TUNE_ITEMS_PER_THREAD),
TUNE_LOAD_ALGORITHM,
TUNE_LOAD_MODIFIER,
TUNE_STORE_ALGORITHM>;
};
using MaxPolicy = policy_t;
};
#endif // !TUNE_BASE
template <typename T, typename OffsetT>
void merge_sort_keys(nvbench::state &state, nvbench::type_list<T, OffsetT>)
{
using key_t = T;
using value_t = cub::NullType;
using key_input_it_t = key_t *;
using value_input_it_t = value_t *;
using key_it_t = key_t *;
using value_it_t = value_t *;
using offset_t = OffsetT;
using compare_op_t = less_t;
#if !TUNE_BASE
using policy_t = policy_hub_t<key_t>;
using dispatch_t = cub::DispatchMergeSort<key_input_it_t,
value_input_it_t,
key_it_t,
value_it_t,
offset_t,
compare_op_t,
policy_t>;
#else // TUNE_BASE
using dispatch_t = hipcub::
DispatchMergeSort<key_input_it_t, value_input_it_t, key_it_t, value_it_t, offset_t, compare_op_t>;
#endif // TUNE_BASE
// Retrieve axis parameters
const auto elements = static_cast<std::size_t>(state.get_int64("Elements{io}"));
const bit_entropy entropy = str_to_entropy(state.get_string("Entropy"));
thrust::device_vector<T> buffer_1(elements);
thrust::device_vector<T> buffer_2(elements);
gen(seed_t{}, buffer_1, entropy);
key_t *d_buffer_1 = thrust::raw_pointer_cast(buffer_1.data());
key_t *d_buffer_2 = thrust::raw_pointer_cast(buffer_2.data());
// Enable throughput calculations and add "Size" column to results.
state.add_element_count(elements);
state.add_global_memory_reads<T>(elements, "Size");
state.add_global_memory_writes<T>(elements);
// Allocate temporary storage:
std::size_t temp_size{};
dispatch_t::Dispatch(nullptr,
temp_size,
d_buffer_1,
nullptr,
d_buffer_2,
nullptr,
static_cast<offset_t>(elements),
compare_op_t{},
0 /* stream */);
thrust::device_vector<nvbench::uint8_t> temp(temp_size);
auto *temp_storage = thrust::raw_pointer_cast(temp.data());
report_entropy(buffer_1, entropy);
state.exec([&](nvbench::launch &launch) {
dispatch_t::Dispatch(temp_storage,
temp_size,
d_buffer_1,
nullptr,
d_buffer_2,
nullptr,
static_cast<offset_t>(elements),
compare_op_t{},
launch.get_stream());
});
}
NVBENCH_BENCH_TYPES(merge_sort_keys, NVBENCH_TYPE_AXES(all_types, offset_types))
.set_name("cub::DeviceMergeSort::SortKeys")
.set_type_axes_names({"T{ct}", "OffsetT{ct}"})
.add_int64_power_of_two_axis("Elements{io}", nvbench::range(16, 28, 4))
.add_string_axis("Entropy", {"1.000", "0.201"});
|
69592040bb4a95b4080e8bc7c542c5ec4cf9fcf1.cu
|
/******************************************************************************
* Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <cub/device/device_merge_sort.cuh>
#include <nvbench_helper.cuh>
// %RANGE% TUNE_TRANSPOSE trp 0:1:1
// %RANGE% TUNE_LOAD ld 0:2:1
// %RANGE% TUNE_ITEMS_PER_THREAD ipt 7:24:1
// %RANGE% TUNE_THREADS_PER_BLOCK_POW2 tpb 6:10:1
#ifndef TUNE_BASE
#define TUNE_THREADS_PER_BLOCK (1 << TUNE_THREADS_PER_BLOCK_POW2)
#endif // TUNE_BASE
using value_t = cub::NullType;
#if !TUNE_BASE
#if TUNE_TRANSPOSE == 0
#define TUNE_LOAD_ALGORITHM cub::BLOCK_LOAD_DIRECT
#define TUNE_STORE_ALGORITHM cub::BLOCK_STORE_DIRECT
#else // TUNE_TRANSPOSE == 1
#define TUNE_LOAD_ALGORITHM cub::BLOCK_LOAD_WARP_TRANSPOSE
#define TUNE_STORE_ALGORITHM cub::BLOCK_STORE_WARP_TRANSPOSE
#endif // TUNE_TRANSPOSE
#if TUNE_LOAD == 0
#define TUNE_LOAD_MODIFIER cub::LOAD_DEFAULT
#elif TUNE_LOAD == 1
#define TUNE_LOAD_MODIFIER cub::LOAD_LDG
#else // TUNE_LOAD == 2
#define TUNE_LOAD_MODIFIER cub::LOAD_CA
#endif // TUNE_LOAD
template <typename KeyT>
struct policy_hub_t
{
struct policy_t : cub::ChainedPolicy<300, policy_t, policy_t>
{
using MergeSortPolicy =
cub::AgentMergeSortPolicy<TUNE_THREADS_PER_BLOCK,
cub::Nominal4BItemsToItems<KeyT>(TUNE_ITEMS_PER_THREAD),
TUNE_LOAD_ALGORITHM,
TUNE_LOAD_MODIFIER,
TUNE_STORE_ALGORITHM>;
};
using MaxPolicy = policy_t;
};
#endif // !TUNE_BASE
template <typename T, typename OffsetT>
void merge_sort_keys(nvbench::state &state, nvbench::type_list<T, OffsetT>)
{
using key_t = T;
using value_t = cub::NullType;
using key_input_it_t = key_t *;
using value_input_it_t = value_t *;
using key_it_t = key_t *;
using value_it_t = value_t *;
using offset_t = OffsetT;
using compare_op_t = less_t;
#if !TUNE_BASE
using policy_t = policy_hub_t<key_t>;
using dispatch_t = cub::DispatchMergeSort<key_input_it_t,
value_input_it_t,
key_it_t,
value_it_t,
offset_t,
compare_op_t,
policy_t>;
#else // TUNE_BASE
using dispatch_t = cub::
DispatchMergeSort<key_input_it_t, value_input_it_t, key_it_t, value_it_t, offset_t, compare_op_t>;
#endif // TUNE_BASE
// Retrieve axis parameters
const auto elements = static_cast<std::size_t>(state.get_int64("Elements{io}"));
const bit_entropy entropy = str_to_entropy(state.get_string("Entropy"));
thrust::device_vector<T> buffer_1(elements);
thrust::device_vector<T> buffer_2(elements);
gen(seed_t{}, buffer_1, entropy);
key_t *d_buffer_1 = thrust::raw_pointer_cast(buffer_1.data());
key_t *d_buffer_2 = thrust::raw_pointer_cast(buffer_2.data());
// Enable throughput calculations and add "Size" column to results.
state.add_element_count(elements);
state.add_global_memory_reads<T>(elements, "Size");
state.add_global_memory_writes<T>(elements);
// Allocate temporary storage:
std::size_t temp_size{};
dispatch_t::Dispatch(nullptr,
temp_size,
d_buffer_1,
nullptr,
d_buffer_2,
nullptr,
static_cast<offset_t>(elements),
compare_op_t{},
0 /* stream */);
thrust::device_vector<nvbench::uint8_t> temp(temp_size);
auto *temp_storage = thrust::raw_pointer_cast(temp.data());
report_entropy(buffer_1, entropy);
state.exec([&](nvbench::launch &launch) {
dispatch_t::Dispatch(temp_storage,
temp_size,
d_buffer_1,
nullptr,
d_buffer_2,
nullptr,
static_cast<offset_t>(elements),
compare_op_t{},
launch.get_stream());
});
}
NVBENCH_BENCH_TYPES(merge_sort_keys, NVBENCH_TYPE_AXES(all_types, offset_types))
.set_name("cub::DeviceMergeSort::SortKeys")
.set_type_axes_names({"T{ct}", "OffsetT{ct}"})
.add_int64_power_of_two_axis("Elements{io}", nvbench::range(16, 28, 4))
.add_string_axis("Entropy", {"1.000", "0.201"});
|
7aaf5cc22000c491ca58bf96b5bbf5e07eff63ad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <hip/hip_runtime.h>
int cpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
clock_t start = clock();
for(int i = 0; i < a_n; i++)
{
for(int j = 0; j < b_m; j++)
{
*(matrix_c + i * b_m + j) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + i * b_m + j) += (* (matrix_a + i * a_m + k)) * (* (matrix_b + k * b_m + j));
}
}
}
return (int)(clock() - start)/ (CLOCKS_PER_SEC / 1000);
}
__global__ void matrixMul(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int temp = id;
while(temp < a_n * b_m)
{
int ROW = temp / b_m;
int COL = temp % b_m;
*(matrix_c + ROW * b_m + COL) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + ROW * b_m + COL) += (* (matrix_a + ROW * a_m + k)) * (* (matrix_b + k * b_m + COL));
}
temp +=blockDim.x*gridDim.x;
}
}
int gpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int * dev_matrix_a, * dev_matrix_b, * dev_matrix_c;
hipEvent_t start, stop;
float elapsed_time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc(&dev_matrix_a, sizeof(int) * a_n * a_m);
hipMalloc(&dev_matrix_b, sizeof(int) * b_n * b_m);
hipMalloc(&dev_matrix_c, sizeof(int) * a_n * b_m);
hipMemcpy(dev_matrix_a, matrix_a, sizeof(int) * a_n * a_m, hipMemcpyHostToDevice);
hipMemcpy(dev_matrix_b, matrix_b, sizeof(int) * b_n * b_m, hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matrixMul), dim3(32),dim3(128), 0, 0, dev_matrix_a, dev_matrix_b, dev_matrix_c, a_n, b_n, a_m, b_m);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(matrix_c, dev_matrix_c, sizeof(int) * a_n * b_m, hipMemcpyDeviceToHost);
hipFree(dev_matrix_a);
hipFree(dev_matrix_b);
hipFree(dev_matrix_c);
return floor(elapsed_time);
}
void allocate_matrix(int **matrix, int n, int m)
{
*matrix = (int*) malloc(sizeof(int) * n * m);
}
void generate_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
*(matrix + i*m +j) = rand() % m;
}
}
}
void output_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
printf("\n");
for(int j = 0; j < m; j++){
printf("%d ",*(matrix + i*m +j));
}
}
}
void compute(int a_row, int b_row, int a_col, int b_col, bool show_matrix_flag = false)
{
int * matrix_a, * matrix_b, * matrix_c_cpu, * matrix_c_gpu, time_cpu, time_gpu;
allocate_matrix(&matrix_a, a_row, a_col);
allocate_matrix(&matrix_b, b_row, b_col);
generate_matrix(matrix_a, a_row, a_col);
generate_matrix(matrix_b, b_row, b_col);
if(show_matrix_flag){
printf("\n\n :");
output_matrix(matrix_a, a_row, a_col);
printf("\n\n B:");
output_matrix(matrix_b, b_row, b_col);
}
allocate_matrix(&matrix_c_cpu, a_row, b_col);
//time_cpu = 0;
time_cpu = cpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_cpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\n C(CPU):");
output_matrix(matrix_c_cpu, a_row, b_col);
}
free(matrix_c_cpu);
allocate_matrix(&matrix_c_gpu, a_row, b_col);
time_gpu = gpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_gpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\n C(GPU):");
output_matrix(matrix_c_gpu, a_row, b_col);
}
free(matrix_c_gpu);
free(matrix_a);
free(matrix_b);
if(!show_matrix_flag){
printf("\n\n (ms) A[%d,%d] * B[%d, %d]:", a_row, a_col, b_row, b_col);
printf("CPU - %d, GPU - %d\n", time_cpu, time_gpu);
}
}
int main() {
srand(time(NULL));
compute(5,6,6,2,true);
compute(32,32,32,32);
compute(64,64,64,64);
compute(128,128,128,128);
compute(256,256,256,256);
compute(512,512,512,512);
compute(1024,1024,1024,1024);
//compute(2048,2048,2048,2048);
//compute(10000,10000,10000,10000);
return 0;
}
|
7aaf5cc22000c491ca58bf96b5bbf5e07eff63ad.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <ctime>
#include <cuda_runtime.h>
int cpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
clock_t start = clock();
for(int i = 0; i < a_n; i++)
{
for(int j = 0; j < b_m; j++)
{
*(matrix_c + i * b_m + j) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + i * b_m + j) += (* (matrix_a + i * a_m + k)) * (* (matrix_b + k * b_m + j));
}
}
}
return (int)(clock() - start)/ (CLOCKS_PER_SEC / 1000);
}
__global__ void matrixMul(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
int temp = id;
while(temp < a_n * b_m)
{
int ROW = temp / b_m;
int COL = temp % b_m;
*(matrix_c + ROW * b_m + COL) = 0;
for(int k = 0; k < a_m; k++){
* (matrix_c + ROW * b_m + COL) += (* (matrix_a + ROW * a_m + k)) * (* (matrix_b + k * b_m + COL));
}
temp +=blockDim.x*gridDim.x;
}
}
int gpu_matrix_multiplication(int *matrix_a, int *matrix_b, int *matrix_c, int a_n, int b_n, int a_m, int b_m)
{
int * dev_matrix_a, * dev_matrix_b, * dev_matrix_c;
cudaEvent_t start, stop;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&dev_matrix_a, sizeof(int) * a_n * a_m);
cudaMalloc(&dev_matrix_b, sizeof(int) * b_n * b_m);
cudaMalloc(&dev_matrix_c, sizeof(int) * a_n * b_m);
cudaMemcpy(dev_matrix_a, matrix_a, sizeof(int) * a_n * a_m, cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrix_b, matrix_b, sizeof(int) * b_n * b_m, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
matrixMul<<<32,128>>>(dev_matrix_a, dev_matrix_b, dev_matrix_c, a_n, b_n, a_m, b_m);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(matrix_c, dev_matrix_c, sizeof(int) * a_n * b_m, cudaMemcpyDeviceToHost);
cudaFree(dev_matrix_a);
cudaFree(dev_matrix_b);
cudaFree(dev_matrix_c);
return floor(elapsed_time);
}
void allocate_matrix(int **matrix, int n, int m)
{
*matrix = (int*) malloc(sizeof(int) * n * m);
}
void generate_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
*(matrix + i*m +j) = rand() % m;
}
}
}
void output_matrix(int *matrix, int n, int m)
{
for(int i = 0; i < n; i++){
printf("\n");
for(int j = 0; j < m; j++){
printf("%d ",*(matrix + i*m +j));
}
}
}
void compute(int a_row, int b_row, int a_col, int b_col, bool show_matrix_flag = false)
{
int * matrix_a, * matrix_b, * matrix_c_cpu, * matrix_c_gpu, time_cpu, time_gpu;
allocate_matrix(&matrix_a, a_row, a_col);
allocate_matrix(&matrix_b, b_row, b_col);
generate_matrix(matrix_a, a_row, a_col);
generate_matrix(matrix_b, b_row, b_col);
if(show_matrix_flag){
printf("\n\nМатрица А:");
output_matrix(matrix_a, a_row, a_col);
printf("\n\nМатрица B:");
output_matrix(matrix_b, b_row, b_col);
}
allocate_matrix(&matrix_c_cpu, a_row, b_col);
//time_cpu = 0;
time_cpu = cpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_cpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\nМатрица C(CPU):");
output_matrix(matrix_c_cpu, a_row, b_col);
}
free(matrix_c_cpu);
allocate_matrix(&matrix_c_gpu, a_row, b_col);
time_gpu = gpu_matrix_multiplication(matrix_a, matrix_b, matrix_c_gpu, a_row, b_row, a_col, b_col);
if(show_matrix_flag){
printf("\n\nМатрица C(GPU):");
output_matrix(matrix_c_gpu, a_row, b_col);
}
free(matrix_c_gpu);
free(matrix_a);
free(matrix_b);
if(!show_matrix_flag){
printf("\n\nВремя выполнения (ms) A[%d,%d] * B[%d, %d]:", a_row, a_col, b_row, b_col);
printf("CPU - %d, GPU - %d\n", time_cpu, time_gpu);
}
}
int main() {
srand(time(NULL));
compute(5,6,6,2,true);
compute(32,32,32,32);
compute(64,64,64,64);
compute(128,128,128,128);
compute(256,256,256,256);
compute(512,512,512,512);
compute(1024,1024,1024,1024);
//compute(2048,2048,2048,2048);
//compute(10000,10000,10000,10000);
return 0;
}
|
7b403e31ce23c98e00dba708345e5c8d167f7a45.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __CUDNN__
#include "SmoothedKLDivLoss.hpp"
template class SmoothedKLDivLoss<float>;
__forceinline__ __device__ int *GetTensorDimIndex(int index1D, int *idxDim, int capacityPerTime)
{
#pragma unroll
for(int i = 0; i < 4; i++) {
idxDim[i] = index1D/capacityPerTime;
index1D %= capacityPerTime;
capacityPerTime /= idxDim[i+1];
}
idxDim[4] = index1D;
return idxDim;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int ti, int ba, int ch, int ro, int co) {
return (((ti * (shapeDim)[1] + ba) * (shapeDim)[2] + ch) * (shapeDim)[3] + ro) * (shapeDim)[4] + co;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int *idxDim) {
return (((idxDim[0] * (shapeDim)[1] + idxDim[1]) * (shapeDim)[2] + idxDim[2]) * (shapeDim)[3] + idxDim[3]) * (shapeDim)[4] + idxDim[4];
}
__global__ void SmoothedKLDivLoss_Forward_Kernel(float *pDevLabel, float *pDevInput, float *pDevSmoothed, float *pDevOutput,
float smoothing, int vocabsize, int timeIndex,
int totalCapacity, int lossCapacity, int timesize, int batchsize, int channelsize, int rowsize, int colsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int lossDimIndex[5] = {timesize, batchsize, channelsize, rowsize, colsize};
GetTensorDimIndex(idx, lossDimIndex, totalCapacity);
int lossDim[5] = {1, 1, 1, 1, 1};
if (timeIndex == 0) {
lossDim[0] = timesize;
lossDim[1] = batchsize;
lossDimIndex[2] = lossDimIndex[3] = lossDimIndex[4] = 0;
}
else if (timeIndex == 2) {
lossDim[0] = timesize;
lossDim[1] = batchsize;
lossDim[2] = channelsize;
lossDimIndex[3] = lossDimIndex[4] = 0;
}
int lossIdx = GetIdx(lossDim, lossDimIndex);
if (idx < totalCapacity && lossIdx < lossCapacity) {
pDevSmoothed[idx] = (1.f - smoothing) *pDevLabel[idx] + (smoothing / (float)vocabsize);
atomicAdd(&pDevOutput[lossIdx], pDevSmoothed[idx] * (logf(pDevSmoothed[idx]) - logf(pDevInput[idx])));
}
}
template<typename DTYPE> Tensor<DTYPE> *SmoothedKLDivLoss<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetTensor();
Tensor<DTYPE> *label = this->GetLabel()->GetResult();
Tensor<DTYPE> *result = this->GetResult();
m_pDevLabel = label->GetGPUData(pTime);
m_pDevInput = input->GetGPUData(pTime);
m_pDevOutput = result->GetGPUData(pTime);
m_pDevSmoothedLabel = m_aSmoothedLabel->GetGPUData(pTime);
int timesize = input->GetTimeSize();
int batchsize = input->GetBatchSize();
int channelsize = input->GetChannelSize();
int rowsize = input->GetRowSize();
int colsize = input->GetColSize();
int totalCapacity = input->GetCapacity();
int lossCapacity = result->GetCapacity();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
hipLaunchKernelGGL(( SmoothedKLDivLoss_Forward_Kernel) , dim3(noBlock), dim3(threadsPerBlock) , 0, 0, m_pDevLabel, m_pDevInput, m_pDevSmoothedLabel, m_pDevOutput,
m_Smoothing, m_VocabSize, m_timeIndex,
totalCapacity, lossCapacity, timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(hipDeviceSynchronize());
return result;
}
__global__ void SmoothedKLDivLoss_BackPropagate_kernel(float *pDevLabel, float *pDevInput, float *pDevInputDelta,
float smoothing, int vocabsize, int totalCapacity) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < totalCapacity) {
pDevInputDelta[idx] = -((1.f-smoothing) * pDevLabel[idx] + (smoothing/(float)vocabsize)) / pDevInput[idx];
}
}
template<typename DTYPE> Tensor<DTYPE> *SmoothedKLDivLoss<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetTensor();
Tensor<DTYPE> *label = this->GetLabel()->GetResult();
Tensor<DTYPE> *input_delta = this->GetOperator()->GetDelta();
m_pDevLabel = label->GetGPUData(pTime);
m_pDevInput = input->GetGPUData(pTime);
m_pDevInputDelta = input_delta->GetGPUData(pTime);
int totalCapacity = input->GetCapacity();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
hipLaunchKernelGGL(( SmoothedKLDivLoss_BackPropagate_kernel) , dim3(noBlock), dim3(threadsPerBlock) , 0, 0, m_pDevLabel, m_pDevInput, m_pDevInputDelta,
m_Smoothing, m_VocabSize, totalCapacity);
return NULL;
}
#endif // ifdef __CUDNN__
|
7b403e31ce23c98e00dba708345e5c8d167f7a45.cu
|
#ifdef __CUDNN__
#include "SmoothedKLDivLoss.hpp"
template class SmoothedKLDivLoss<float>;
__forceinline__ __device__ int *GetTensorDimIndex(int index1D, int *idxDim, int capacityPerTime)
{
#pragma unroll
for(int i = 0; i < 4; i++) {
idxDim[i] = index1D/capacityPerTime;
index1D %= capacityPerTime;
capacityPerTime /= idxDim[i+1];
}
idxDim[4] = index1D;
return idxDim;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int ti, int ba, int ch, int ro, int co) {
return (((ti * (shapeDim)[1] + ba) * (shapeDim)[2] + ch) * (shapeDim)[3] + ro) * (shapeDim)[4] + co;
}
__forceinline__ __device__ unsigned int GetIdx(int *shapeDim, int *idxDim) {
return (((idxDim[0] * (shapeDim)[1] + idxDim[1]) * (shapeDim)[2] + idxDim[2]) * (shapeDim)[3] + idxDim[3]) * (shapeDim)[4] + idxDim[4];
}
__global__ void SmoothedKLDivLoss_Forward_Kernel(float *pDevLabel, float *pDevInput, float *pDevSmoothed, float *pDevOutput,
float smoothing, int vocabsize, int timeIndex,
int totalCapacity, int lossCapacity, int timesize, int batchsize, int channelsize, int rowsize, int colsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int lossDimIndex[5] = {timesize, batchsize, channelsize, rowsize, colsize};
GetTensorDimIndex(idx, lossDimIndex, totalCapacity);
int lossDim[5] = {1, 1, 1, 1, 1};
if (timeIndex == 0) {
lossDim[0] = timesize;
lossDim[1] = batchsize;
lossDimIndex[2] = lossDimIndex[3] = lossDimIndex[4] = 0;
}
else if (timeIndex == 2) {
lossDim[0] = timesize;
lossDim[1] = batchsize;
lossDim[2] = channelsize;
lossDimIndex[3] = lossDimIndex[4] = 0;
}
int lossIdx = GetIdx(lossDim, lossDimIndex);
if (idx < totalCapacity && lossIdx < lossCapacity) {
pDevSmoothed[idx] = (1.f - smoothing) *pDevLabel[idx] + (smoothing / (float)vocabsize);
atomicAdd(&pDevOutput[lossIdx], pDevSmoothed[idx] * (logf(pDevSmoothed[idx]) - logf(pDevInput[idx])));
}
}
template<typename DTYPE> Tensor<DTYPE> *SmoothedKLDivLoss<DTYPE>::ForwardPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetTensor();
Tensor<DTYPE> *label = this->GetLabel()->GetResult();
Tensor<DTYPE> *result = this->GetResult();
m_pDevLabel = label->GetGPUData(pTime);
m_pDevInput = input->GetGPUData(pTime);
m_pDevOutput = result->GetGPUData(pTime);
m_pDevSmoothedLabel = m_aSmoothedLabel->GetGPUData(pTime);
int timesize = input->GetTimeSize();
int batchsize = input->GetBatchSize();
int channelsize = input->GetChannelSize();
int rowsize = input->GetRowSize();
int colsize = input->GetColSize();
int totalCapacity = input->GetCapacity();
int lossCapacity = result->GetCapacity();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
SmoothedKLDivLoss_Forward_Kernel <<< noBlock, threadsPerBlock >>> (m_pDevLabel, m_pDevInput, m_pDevSmoothedLabel, m_pDevOutput,
m_Smoothing, m_VocabSize, m_timeIndex,
totalCapacity, lossCapacity, timesize, batchsize, channelsize, rowsize, colsize);
checkCudaErrors(cudaDeviceSynchronize());
return result;
}
__global__ void SmoothedKLDivLoss_BackPropagate_kernel(float *pDevLabel, float *pDevInput, float *pDevInputDelta,
float smoothing, int vocabsize, int totalCapacity) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < totalCapacity) {
pDevInputDelta[idx] = -((1.f-smoothing) * pDevLabel[idx] + (smoothing/(float)vocabsize)) / pDevInput[idx];
}
}
template<typename DTYPE> Tensor<DTYPE> *SmoothedKLDivLoss<DTYPE>::BackPropagateOnGPU(int pTime) {
Tensor<DTYPE> *input = this->GetTensor();
Tensor<DTYPE> *label = this->GetLabel()->GetResult();
Tensor<DTYPE> *input_delta = this->GetOperator()->GetDelta();
m_pDevLabel = label->GetGPUData(pTime);
m_pDevInput = input->GetGPUData(pTime);
m_pDevInputDelta = input_delta->GetGPUData(pTime);
int totalCapacity = input->GetCapacity();
int threadsPerBlock = 128;
int noBlock = totalCapacity / threadsPerBlock + 1;
SmoothedKLDivLoss_BackPropagate_kernel <<< noBlock, threadsPerBlock >>> (m_pDevLabel, m_pDevInput, m_pDevInputDelta,
m_Smoothing, m_VocabSize, totalCapacity);
return NULL;
}
#endif // ifdef __CUDNN__
|
bf11c388c9b97ea7726d877bebbfc5eca692df0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "cuenergy.h"
#include "half2_operator_overload.cuh"
#include "fast_math.cuh"
#include "hip/hip_fp16.h"
#include <stdio.h>
#if UNROLLX != 8
# error "UNROLLX must be 8"
#endif
#if BLOCKSIZEX != 16
# error "BLOCKSIZEX must be 16"
#endif
// Max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about.
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
__constant__ float4 atominfo[MAXATOMS];
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
half2 coory = __float2half2_rn(gridspacing * yindex);
half2 coorx = __float2half2_rn(gridspacing * xindex);
half2 energyvalx1= __float2half2_rn(0.0f);
half2 energyvalx2= __float2half2_rn(0.0f);
half2 energyvalx3= __float2half2_rn(0.0f);
half2 energyvalx4= __float2half2_rn(0.0f);
half2 energyvalx5= __float2half2_rn(0.0f);
half2 energyvalx6= __float2half2_rn(0.0f);
half2 energyvalx7= __float2half2_rn(0.0f);
half2 energyvalx8= __float2half2_rn(0.0f);
half2 gridspacing_u = __float2half2_rn(gridspacing * BLOCKSIZEX);
int atomid;
for (atomid=0; atomid<numatoms/2; atomid++) {
half2 atom_info_y = __floats2half2_rn(atominfo[2*atomid].y, atominfo[2*atomid+1].y);
//float dy = coory - atominfo[atomid].y;
//float dyz2 = (dy * dy) + atominfo[atomid].z;
half2 atom_info_z = __floats2half2_rn(atominfo[2*atomid].z, atominfo[2*atomid+1].z);
half2 atom_info_x = __floats2half2_rn(atominfo[2*atomid].x, atominfo[2*atomid+1].x);
half2 atom_info_w = __floats2half2_rn(atominfo[2*atomid].w, atominfo[2*atomid+1].w);
half2 dy = coory - atom_info_y;
half2 dyz2 = (dy * dy) + atom_info_z;
//float dx1 = coorx - atominfo[atomid].x;
half2 dx1 = coorx - atom_info_x;
half2 dx2 = dx1 + gridspacing_u;
half2 dx3 = dx2 + gridspacing_u;
half2 dx4 = dx3 + gridspacing_u;
half2 dx5 = dx4 + gridspacing_u;
half2 dx6 = dx5 + gridspacing_u;
half2 dx7 = dx6 + gridspacing_u;
half2 dx8 = dx7 + gridspacing_u;
/*
energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2));
energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2));
energyvalx3 += atominfo[atomid].w * (1.0f / sqrtf(dx3*dx3 + dyz2));
energyvalx4 += atominfo[atomid].w * (1.0f / sqrtf(dx4*dx4 + dyz2));
energyvalx5 += atominfo[atomid].w * (1.0f / sqrtf(dx5*dx5 + dyz2));
energyvalx6 += atominfo[atomid].w * (1.0f / sqrtf(dx6*dx6 + dyz2));
energyvalx7 += atominfo[atomid].w * (1.0f / sqrtf(dx7*dx7 + dyz2));
energyvalx8 += atominfo[atomid].w * (1.0f / sqrtf(dx8*dx8 + dyz2));
*/
energyvalx1 += atom_info_w * (fast_h2rsqrt(dx1*dx1 + dyz2));
energyvalx2 += atom_info_w * (fast_h2rsqrt(dx2*dx2 + dyz2));
energyvalx3 += atom_info_w * (fast_h2rsqrt(dx3*dx3 + dyz2));
energyvalx4 += atom_info_w * (fast_h2rsqrt(dx4*dx4 + dyz2));
energyvalx5 += atom_info_w * (fast_h2rsqrt(dx5*dx5 + dyz2));
energyvalx6 += atom_info_w * (fast_h2rsqrt(dx6*dx6 + dyz2));
energyvalx7 += atom_info_w * (fast_h2rsqrt(dx7*dx7 + dyz2));
energyvalx8 += atom_info_w * (fast_h2rsqrt(dx8*dx8 + dyz2));
}
float energyvalx1_float = __half2float(energyvalx1.x + energyvalx1.y);
float energyvalx2_float = __half2float(energyvalx2.x + energyvalx2.y);
float energyvalx3_float = __half2float(energyvalx3.x + energyvalx3.y);
float energyvalx4_float = __half2float(energyvalx4.x + energyvalx4.y);
float energyvalx5_float = __half2float(energyvalx5.x + energyvalx5.y);
float energyvalx6_float = __half2float(energyvalx6.x + energyvalx6.y);
float energyvalx7_float = __half2float(energyvalx7.x + energyvalx7.y);
float energyvalx8_float = __half2float(energyvalx8.x + energyvalx8.y);
energygrid[outaddr] += energyvalx1_float;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2_float;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3_float;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4_float;
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5_float;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6_float;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7_float;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8_float;
}
// This function copies atoms from the CPU to the GPU and
// precalculates (z^2) for each atom.
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
hipMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
|
bf11c388c9b97ea7726d877bebbfc5eca692df0e.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "cuenergy.h"
#include "half2_operator_overload.cuh"
#include "fast_math.cuh"
#include "cuda_fp16.h"
#include <stdio.h>
#if UNROLLX != 8
# error "UNROLLX must be 8"
#endif
#if BLOCKSIZEX != 16
# error "BLOCKSIZEX must be 16"
#endif
// Max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about.
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
__constant__ float4 atominfo[MAXATOMS];
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
half2 coory = __float2half2_rn(gridspacing * yindex);
half2 coorx = __float2half2_rn(gridspacing * xindex);
half2 energyvalx1= __float2half2_rn(0.0f);
half2 energyvalx2= __float2half2_rn(0.0f);
half2 energyvalx3= __float2half2_rn(0.0f);
half2 energyvalx4= __float2half2_rn(0.0f);
half2 energyvalx5= __float2half2_rn(0.0f);
half2 energyvalx6= __float2half2_rn(0.0f);
half2 energyvalx7= __float2half2_rn(0.0f);
half2 energyvalx8= __float2half2_rn(0.0f);
half2 gridspacing_u = __float2half2_rn(gridspacing * BLOCKSIZEX);
int atomid;
for (atomid=0; atomid<numatoms/2; atomid++) {
half2 atom_info_y = __floats2half2_rn(atominfo[2*atomid].y, atominfo[2*atomid+1].y);
//float dy = coory - atominfo[atomid].y;
//float dyz2 = (dy * dy) + atominfo[atomid].z;
half2 atom_info_z = __floats2half2_rn(atominfo[2*atomid].z, atominfo[2*atomid+1].z);
half2 atom_info_x = __floats2half2_rn(atominfo[2*atomid].x, atominfo[2*atomid+1].x);
half2 atom_info_w = __floats2half2_rn(atominfo[2*atomid].w, atominfo[2*atomid+1].w);
half2 dy = coory - atom_info_y;
half2 dyz2 = (dy * dy) + atom_info_z;
//float dx1 = coorx - atominfo[atomid].x;
half2 dx1 = coorx - atom_info_x;
half2 dx2 = dx1 + gridspacing_u;
half2 dx3 = dx2 + gridspacing_u;
half2 dx4 = dx3 + gridspacing_u;
half2 dx5 = dx4 + gridspacing_u;
half2 dx6 = dx5 + gridspacing_u;
half2 dx7 = dx6 + gridspacing_u;
half2 dx8 = dx7 + gridspacing_u;
/*
energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2));
energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2));
energyvalx3 += atominfo[atomid].w * (1.0f / sqrtf(dx3*dx3 + dyz2));
energyvalx4 += atominfo[atomid].w * (1.0f / sqrtf(dx4*dx4 + dyz2));
energyvalx5 += atominfo[atomid].w * (1.0f / sqrtf(dx5*dx5 + dyz2));
energyvalx6 += atominfo[atomid].w * (1.0f / sqrtf(dx6*dx6 + dyz2));
energyvalx7 += atominfo[atomid].w * (1.0f / sqrtf(dx7*dx7 + dyz2));
energyvalx8 += atominfo[atomid].w * (1.0f / sqrtf(dx8*dx8 + dyz2));
*/
energyvalx1 += atom_info_w * (fast_h2rsqrt(dx1*dx1 + dyz2));
energyvalx2 += atom_info_w * (fast_h2rsqrt(dx2*dx2 + dyz2));
energyvalx3 += atom_info_w * (fast_h2rsqrt(dx3*dx3 + dyz2));
energyvalx4 += atom_info_w * (fast_h2rsqrt(dx4*dx4 + dyz2));
energyvalx5 += atom_info_w * (fast_h2rsqrt(dx5*dx5 + dyz2));
energyvalx6 += atom_info_w * (fast_h2rsqrt(dx6*dx6 + dyz2));
energyvalx7 += atom_info_w * (fast_h2rsqrt(dx7*dx7 + dyz2));
energyvalx8 += atom_info_w * (fast_h2rsqrt(dx8*dx8 + dyz2));
}
float energyvalx1_float = __half2float(energyvalx1.x + energyvalx1.y);
float energyvalx2_float = __half2float(energyvalx2.x + energyvalx2.y);
float energyvalx3_float = __half2float(energyvalx3.x + energyvalx3.y);
float energyvalx4_float = __half2float(energyvalx4.x + energyvalx4.y);
float energyvalx5_float = __half2float(energyvalx5.x + energyvalx5.y);
float energyvalx6_float = __half2float(energyvalx6.x + energyvalx6.y);
float energyvalx7_float = __half2float(energyvalx7.x + energyvalx7.y);
float energyvalx8_float = __half2float(energyvalx8.x + energyvalx8.y);
energygrid[outaddr] += energyvalx1_float;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2_float;
energygrid[outaddr+2*BLOCKSIZEX] += energyvalx3_float;
energygrid[outaddr+3*BLOCKSIZEX] += energyvalx4_float;
energygrid[outaddr+4*BLOCKSIZEX] += energyvalx5_float;
energygrid[outaddr+5*BLOCKSIZEX] += energyvalx6_float;
energygrid[outaddr+6*BLOCKSIZEX] += energyvalx7_float;
energygrid[outaddr+7*BLOCKSIZEX] += energyvalx8_float;
}
// This function copies atoms from the CPU to the GPU and
// precalculates (z^2) for each atom.
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
|
ce48e790c211654dc539bcc2bbd444dc6c44b0d1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<scalar_t, 3>
#define DeviceTensor1 THCDeviceTensor<scalar_t, 1>
template <int Dim>
static THCDeviceTensor<scalar_t, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<scalar_t, Dim>();
}
int inDim = THCTensor_nDimensionLegacyAll(state, t);
if (inDim == Dim) {
return toDeviceTensor<scalar_t, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(t->is_contiguous());
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = THTensor_sizeLegacyNoScalars(t, i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= THTensor_sizeLegacyNoScalars(t, i);
}
}
return THCDeviceTensor<scalar_t, Dim>(t->data<scalar_t>(), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInference_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutput_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, output, weight, bias, static_cast<accreal>(eps), static_cast<accreal>(momentum), runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(hipGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
hipStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
hipLaunchKernelGGL(( BatchNormalizationBackward_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3>) , dim3(blocks), dim3(threads), 0, s,
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(hipGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
|
ce48e790c211654dc539bcc2bbd444dc6c44b0d1.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BatchNormalization.cu"
#else
#define DeviceTensor3 THCDeviceTensor<scalar_t, 3>
#define DeviceTensor1 THCDeviceTensor<scalar_t, 1>
template <int Dim>
static THCDeviceTensor<scalar_t, Dim> THNN_(devicetensor)(THCState *state, THCTensor *t) {
if (!t) {
return THCDeviceTensor<scalar_t, Dim>();
}
int inDim = THCTensor_nDimensionLegacyAll(state, t);
if (inDim == Dim) {
return toDeviceTensor<scalar_t, Dim>(state, t);
}
// View in which the last dimensions are collapsed or expanded as needed
THAssert(t->is_contiguous());
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = THTensor_sizeLegacyNoScalars(t, i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= THTensor_sizeLegacyNoScalars(t, i);
}
}
return THCDeviceTensor<scalar_t, Dim>(t->data<scalar_t>(), size);
}
void THNN_(BatchNormalization_updateOutput)(
THCState *state, THCTensor *input_, THCTensor *output_,
THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
bool train, double momentum, double eps) {
THCTensor_(resizeAs)(state, output_, input_);
if (train) {
int64_t nInput = THCTensor_(size)(state, input_, 1);
THCTensor_(resize1d)(state, saveMean_, nInput);
THCTensor_(resize1d)(state, saveStd_, nInput);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 output = THNN_(devicetensor)<3>(state, output_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 bias = THNN_(devicetensor)<1>(state, bias_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
if (!train) {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutputInference_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, runningMean, runningVar, weight, bias, eps);
} else {
dim3 blocks(input.getSize(1));
dim3 threads(getNumThreads(input.getSize(2)));
BatchNormalizationUpdateOutput_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, output, weight, bias, static_cast<accreal>(eps), static_cast<accreal>(momentum), runningMean, runningVar,
saveMean, saveStd);
}
THCudaCheck(cudaGetLastError());
}
void THNN_(BatchNormalization_backward)(
THCState *state, THCTensor *input_, THCTensor *gradOutput_,
THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
THCTensor *saveMean_, THCTensor *saveStd_, bool train, double scale, double eps) {
THCUNN_check_shape(state, input_, gradOutput_);
if (gradInput_) {
THCTensor_(resizeAs)(state, gradInput_, input_);
}
DeviceTensor3 input = THNN_(devicetensor)<3>(state, input_);
DeviceTensor3 gradOutput = THNN_(devicetensor)<3>(state, gradOutput_);
DeviceTensor3 gradInput = THNN_(devicetensor)<3>(state, gradInput_);
DeviceTensor1 gradWeight = THNN_(devicetensor)<1>(state, gradWeight_);
DeviceTensor1 gradBias = THNN_(devicetensor)<1>(state, gradBias_);
DeviceTensor1 weight = THNN_(devicetensor)<1>(state, weight_);
DeviceTensor1 runningMean = THNN_(devicetensor)<1>(state, runningMean_);
DeviceTensor1 runningVar = THNN_(devicetensor)<1>(state, runningVar_);
DeviceTensor1 saveMean = THNN_(devicetensor)<1>(state, saveMean_);
DeviceTensor1 saveStd = THNN_(devicetensor)<1>(state, saveStd_);
cudaStream_t s = THCState_getCurrentStream(state);
dim3 blocks(gradOutput.getSize(1));
dim3 threads(getNumThreads(gradOutput.getSize(2)));
BatchNormalizationBackward_kernel<scalar_t, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
saveMean, saveStd, train, scale, eps);
THCudaCheck(cudaGetLastError());
}
#undef DeviceTensor3
#undef DeviceTensor1
#endif
|
3dc73f23603c273f132f15bbb746125206463795.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(const Tensor& _input, int dim, const char *name)
{
if (dim < 0)
dim += _input.numDim;
Softmax *sm = new Softmax(*this, _input, dim, name);
layers.push_back(sm);
return sm->outputs[0];
}
SoftmaxMeta::SoftmaxMeta(FFHandler handler,
const Softmax* softmax,
const Domain& input_domain)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(inputTensor, input_domain));
dim = softmax->dim;
profiling = softmax->profiling;
std::strcpy(op_name, softmax->name);
}
Softmax::Softmax(FFModel& model,
const Tensor& _input,
int _dim,
const char* name)
: Op(model, OP_SOFTMAX, name, _input),
dim(_input.numDim-1-_dim)
{
// Currently assume we always perform softmax along the inner most dim
assert(dim == 0);
outputs[0].numDim = _input.numDim;
for (int i = 0; i < _input.numDim; i++) {
outputs[0].adim[i] = _input.adim[i];
}
}
void Softmax::create_weights(FFModel& model)
{
// Do nothing since we don't ahve weights
}
void Softmax::create_output_and_partition(FFModel& model)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim
assert(false);
}
}
}
template<int NDIM>
void Softmax::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
//int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], (IndexSpaceT<NDIM>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Softmax* softmax = (Softmax*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(input_domain == output_domain);
SoftmaxMeta* m = new SoftmaxMeta(handle, softmax, output_domain);
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(2, pcname, pc);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/* static */
void Softmax::forward_kernel(SoftmaxMeta const *m,
float const *input_ptr,
float *output_ptr,
hipStream_t stream)
{
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, input_ptr,
&beta, m->inputTensor, output_ptr));
}
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
template<int NDIM>
__host__
void Softmax::forward_task_with_dim(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipEvent_t t_start, t_end;
if (m->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start, stream);
}
forward_kernel(m, acc_input.ptr, acc_output.ptr, stream);
if (m->profiling) {
hipEventRecord(t_end, stream);
checkCUDA(hipEventSynchronize(t_end));
//print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Softmax:forward:input]");
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Softmax:forward:output]");
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("%s [Softmax] forward time = %.2fms\n", m->op_name, elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/* static */
void Softmax::backward_kernel(float *input_grad_ptr,
float const *output_grad_ptr,
size_t num_elements,
hipStream_t stream)
{
checkCUDA(hipMemcpyAsync(input_grad_ptr, output_grad_ptr,
num_elements * sizeof(float),
hipMemcpyDeviceToDevice, stream));
}
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I/O): input_grad
regions[1](I): output_grad
*/
// Note that the backward task of softmax is actually a no op (i.e., input_grad = output_grad)
// since the upstream cross_entropy_loss function computes performs softmax_cross_entropy_loss
// to avoid intermediate zeros
template<int NDIM>
__host__
void Softmax::backward_task_with_dim(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, NDIM> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, NDIM> acc_output_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_input_grad.rect == acc_output_grad.rect);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
hipEvent_t t_start, t_end;
if (m->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start, stream);
}
backward_kernel(acc_input_grad.ptr, acc_output_grad.ptr, acc_input_grad.rect.volume(), stream);
if (m->profiling) {
hipEventRecord(t_end, stream);
checkCUDA(hipEventSynchronize(t_end));
//print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Softmax:backward:output_grad]");
//print_tensor<2, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Softmax:backward:input_grad]");
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Softmax::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
Tensor sub_output, sub_input;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) {
return false;
}
if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) {
return false;
}
SoftmaxMeta *m = new SoftmaxMeta(sim->handler, this, sub_output.get_domain());
sim->free_all();
float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert (input_ptr != NULL);
float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_ptr != NULL);
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
std::function<void()> forward, backward;
forward = [&] {
forward_kernel(m, input_ptr, output_ptr, stream);
};
if (sim->computationMode == COMP_MODE_TRAINING) {
float* input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_grad_ptr != NULL);
float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_grad_ptr != NULL);
backward = [&] {
backward_kernel(input_grad_ptr, output_grad_ptr, sub_output.get_volume(), stream);
};
}
inner_measure_operator_cost(sim, forward, backward, cost_metrics);
if (sim->computationMode == COMP_MODE_TRAINING) {
printf("[Measure Softmax] name(%s) num_elements(%zu) forward_time(%.4lf) backward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time,
cost_metrics.backward_time);
} else {
printf("[Measure Softmax] name(%s) num_elements(%zu) forward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time);
}
// Free softmaxmeta
delete m;
return true;
}
|
3dc73f23603c273f132f15bbb746125206463795.cu
|
/* Copyright 2017 Stanford, NVIDIA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::softmax(const Tensor& _input, int dim, const char *name)
{
if (dim < 0)
dim += _input.numDim;
Softmax *sm = new Softmax(*this, _input, dim, name);
layers.push_back(sm);
return sm->outputs[0];
}
SoftmaxMeta::SoftmaxMeta(FFHandler handler,
const Softmax* softmax,
const Domain& input_domain)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(inputTensor, input_domain));
dim = softmax->dim;
profiling = softmax->profiling;
std::strcpy(op_name, softmax->name);
}
Softmax::Softmax(FFModel& model,
const Tensor& _input,
int _dim,
const char* name)
: Op(model, OP_SOFTMAX, name, _input),
dim(_input.numDim-1-_dim)
{
// Currently assume we always perform softmax along the inner most dim
assert(dim == 0);
outputs[0].numDim = _input.numDim;
for (int i = 0; i < _input.numDim; i++) {
outputs[0].adim[i] = _input.adim[i];
}
}
void Softmax::create_weights(FFModel& model)
{
// Do nothing since we don't ahve weights
}
void Softmax::create_output_and_partition(FFModel& model)
{
int dim = outputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim
assert(false);
}
}
}
template<int NDIM>
void Softmax::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
std::string pcname = name;
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1;
//int num_par_n = part_rect.hi[1] - part_rect.lo[1] + 1;
// Current require data parallelism for Softmax
assert(num_par_c == 1);
{
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
}
// Compute partition bound for input
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], (IndexSpaceT<NDIM>)task_is, input_lps[0], input_grad_lps[0]);
}
}
/*
regions[0]: input
regions[1]: output
*/
OpMeta* Softmax::init_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
const Softmax* softmax = (Softmax*) task->args;
FFHandler handle = *((const FFHandler*) task->local_args);
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(input_domain == output_domain);
SoftmaxMeta* m = new SoftmaxMeta(handle, softmax, output_domain);
//checkCUDNN(cudnnCreateTensorDescriptor(&m->outputTensor));
return m;
}
__host__
void Softmax::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Rect<2> rect = runtime->get_index_space_domain(ctx, task_is);
ParallelConfig pc;
std::string pcname = name;
ff.config.find_parallel_config(2, pcname, pc);
int idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
FFHandler handle = ff.handlers[pc.device_ids[idx++]];
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler)));
}
IndexLauncher launcher(SOFTMAX_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(Softmax)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_DISCARD, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
idx = 0;
for (PointInRectIterator<2> it(rect); it(); it++) {
meta[idx++] = fm.get_result<OpMeta*>(*it);
}
}
/* static */
void Softmax::forward_kernel(SoftmaxMeta const *m,
float const *input_ptr,
float *output_ptr,
cudaStream_t stream)
{
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnSoftmaxForward(m->handle.dnn,
CUDNN_SOFTMAX_ACCURATE,
CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, m->inputTensor, input_ptr,
&beta, m->inputTensor, output_ptr));
}
void Softmax::forward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return forward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I): input
regions[1](O): output
*/
template<int NDIM>
__host__
void Softmax::forward_task_with_dim(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorR<float, NDIM> acc_input(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
TensorAccessorW<float, NDIM> acc_output(
regions[1], task->regions[1], FID_DATA, ctx, runtime,
false/*readOutput*/);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
cudaEvent_t t_start, t_end;
if (m->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start, stream);
}
forward_kernel(m, acc_input.ptr, acc_output.ptr, stream);
if (m->profiling) {
cudaEventRecord(t_end, stream);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<2, float>(acc_input.ptr, acc_input.rect, "[Softmax:forward:input]");
//print_tensor<2, float>(acc_output.ptr, acc_output.rect, "[Softmax:forward:output]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("%s [Softmax] forward time = %.2fms\n", m->op_name, elapsed);
}
}
__host__
void Softmax::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(SOFTMAX_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
/* static */
void Softmax::backward_kernel(float *input_grad_ptr,
float const *output_grad_ptr,
size_t num_elements,
cudaStream_t stream)
{
checkCUDA(cudaMemcpyAsync(input_grad_ptr, output_grad_ptr,
num_elements * sizeof(float),
cudaMemcpyDeviceToDevice, stream));
}
void Softmax::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
switch (in_domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
return backward_task_with_dim<DIM>(task, regions, ctx, runtime);
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
/*
regions[0](I/O): input_grad
regions[1](I): output_grad
*/
// Note that the backward task of softmax is actually a no op (i.e., input_grad = output_grad)
// since the upstream cross_entropy_loss function computes performs softmax_cross_entropy_loss
// to avoid intermediate zeros
template<int NDIM>
__host__
void Softmax::backward_task_with_dim(
const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 2);
assert(task->regions.size() == 2);
//const Softmax* softmax = (Softmax*) task->args;
const SoftmaxMeta* m = *((SoftmaxMeta**) task->local_args);
TensorAccessorW<float, NDIM> acc_input_grad(
regions[0], task->regions[0], FID_DATA, ctx, runtime,
true/*readOutput*/);
TensorAccessorR<float, NDIM> acc_output_grad(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
// make sure the image indices match!
assert(acc_input_grad.rect == acc_output_grad.rect);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
cudaEvent_t t_start, t_end;
if (m->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start, stream);
}
backward_kernel(acc_input_grad.ptr, acc_output_grad.ptr, acc_input_grad.rect.volume(), stream);
if (m->profiling) {
cudaEventRecord(t_end, stream);
checkCUDA(cudaEventSynchronize(t_end));
//print_tensor<2, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Softmax:backward:output_grad]");
//print_tensor<2, float>(acc_input_grad.ptr, acc_input_grad.rect, "[Softmax:backward:input_grad]");
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
printf("Softmax backward time = %.2fms\n", elapsed);
}
}
__host__
void Softmax::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(SOFTMAX_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(1, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
bool Softmax::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
Tensor sub_output, sub_input;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) {
return false;
}
if (!inputs[0].get_input_sub_tensor(pc, sub_input, op_type)) {
return false;
}
SoftmaxMeta *m = new SoftmaxMeta(sim->handler, this, sub_output.get_domain());
sim->free_all();
float *input_ptr = (float *)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert (input_ptr != NULL);
float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_ptr != NULL);
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
std::function<void()> forward, backward;
forward = [&] {
forward_kernel(m, input_ptr, output_ptr, stream);
};
if (sim->computationMode == COMP_MODE_TRAINING) {
float* input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT);
assert(input_grad_ptr != NULL);
float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT);
assert (output_grad_ptr != NULL);
backward = [&] {
backward_kernel(input_grad_ptr, output_grad_ptr, sub_output.get_volume(), stream);
};
}
inner_measure_operator_cost(sim, forward, backward, cost_metrics);
if (sim->computationMode == COMP_MODE_TRAINING) {
printf("[Measure Softmax] name(%s) num_elements(%zu) forward_time(%.4lf) backward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time,
cost_metrics.backward_time);
} else {
printf("[Measure Softmax] name(%s) num_elements(%zu) forward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time);
}
// Free softmaxmeta
delete m;
return true;
}
|
a7653fdba9280ddbd8cc8e06a74d39ba8ef93018.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
struct hardtanhupdateOutput_functor
{
const float max_val_;
const float min_val_;
hardtanhupdateOutput_functor(float min_val, float max_val)
: min_val_(min_val)
, max_val_(max_val)
{}
__device__ void operator()(float *output, const float *input) const
{
if (*input < min_val_)
*output = min_val_;
else if (*input <= max_val_)
*output = *input;
else
*output = max_val_;
}
};
void THNN_CudaHardTanh_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, float min_val, float max_val)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input,
hardtanhupdateOutput_functor(min_val, max_val));
}
struct hardtanhupdateGradInput_functor
{
const float max_val_;
const float min_val_;
hardtanhupdateGradInput_functor(float min_val, float max_val)
: min_val_(min_val)
, max_val_(max_val)
{}
__device__ void operator()(float *gradInput, const float *input, const float *gradOutput) const
{
if (*input < min_val_ || *input > max_val_)
*gradInput = 0;
else
*gradInput = *gradOutput;
}
};
void THNN_CudaHardTanh_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, float min_val, float max_val)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput,
hardtanhupdateGradInput_functor(min_val, max_val));
}
|
a7653fdba9280ddbd8cc8e06a74d39ba8ef93018.cu
|
#include "THCUNN.h"
#include "common.h"
struct hardtanhupdateOutput_functor
{
const float max_val_;
const float min_val_;
hardtanhupdateOutput_functor(float min_val, float max_val)
: min_val_(min_val)
, max_val_(max_val)
{}
__device__ void operator()(float *output, const float *input) const
{
if (*input < min_val_)
*output = min_val_;
else if (*input <= max_val_)
*output = *input;
else
*output = max_val_;
}
};
void THNN_CudaHardTanh_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, float min_val, float max_val)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input,
hardtanhupdateOutput_functor(min_val, max_val));
}
struct hardtanhupdateGradInput_functor
{
const float max_val_;
const float min_val_;
hardtanhupdateGradInput_functor(float min_val, float max_val)
: min_val_(min_val)
, max_val_(max_val)
{}
__device__ void operator()(float *gradInput, const float *input, const float *gradOutput) const
{
if (*input < min_val_ || *input > max_val_)
*gradInput = 0;
else
*gradInput = *gradOutput;
}
};
void THNN_CudaHardTanh_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, float min_val, float max_val)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput,
hardtanhupdateGradInput_functor(min_val, max_val));
}
|
3a3ff39552a8750b930608f89413c835f2d73d2e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <sys/stat.h>
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <string>
//#include <pthread.h>
//#include "thpool.h"
struct solver { // The variables in the struct are described in the allocate procedure
int* DB, nVars, nClauses, mem_used, mem_fixed, mem_max, maxLemmas, nLemmas, * buffer, nConflicts, * model,
* reason, * falseStack, * _false, * first, * forced, * processed, * assigned, * next, * prev, head, res, fast, slow,
result, file_id;
};
typedef struct {
int files_count;
double parse_time;
double init_time;
double solve_time;
double tot_time;
} Metrics;
enum { END = -9, UNSAT = 0, SAT = 1, MARK = 2, IMPLIED = 6 };
void deviceInfo(){
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void showMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ;
if ( hipSuccess != cuda_status ){
printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
//showMem();
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
int* getMemory(struct solver* S, int mem_size) { // Allocate memory of size mem_size
if (S->mem_used + mem_size > S->mem_max) { // In case the code is used within a code base
printf("c out of memory\n");
return 0;
}
int* store = (S->DB + S->mem_used); // Compute a pointer to the new memory location
S->mem_used += mem_size; // Update the size of the used memory
return store;
} // Return the pointer
__device__
void unassign(struct solver* S, int lit) { S->_false[lit] = 0; } // Unassign the literal
__device__
void restart(struct solver* S) { // Perform a restart (i.e., unassign all variables)
while (S->assigned > S->forced) unassign(S, *(--S->assigned)); // Remove all unforced false lits from falseStack
S->processed = S->forced;
} // Reset the processed pointer
__device__
void assign(struct solver* S, int* reason, int forced) { // Make the first literal of the reason true
int lit = reason[0]; // Let lit be the first ltieral in the reason
S->_false[-lit] = forced ? IMPLIED : 1; // Mark lit as true and IMPLIED if forced
*(S->assigned++) = -lit; // Push it on the assignment stack
S->reason[abs(lit)] = 1 + (int)((reason)-S->DB); // Set the reason clause of lit
S->model[abs(lit)] = (lit > 0);
} // Mark the literal as true in the model
__device__
void addWatch(struct solver* S, int lit, int mem) { // Add a watch pointer to a cfor entry function '_Z5solveP6solver' containing lit
S->DB[mem] = S->first[lit]; S->first[lit] = mem;
} // By updating the database afor entry function '_Z5solveP6solver'e pointers
__device__
int* addClause(struct solver* S, int* buffer, int size, int irr) { // Adds a clause stored in *in of size size
int i, used = S->mem_used; // Store a pointer to the beginning of the clause
int* clause = getMemory(S, size + 3) + 2; // Allocate memory for the clause in the database
if (size > 1) {
addWatch(S, buffer[0], used); // If the clause is not unit, then add
addWatch(S, buffer[1], used + 1);
} // Two watch pointers to the datastructure
for (i = 0; i < size; i++) clause[i] = buffer[i]; clause[i] = 0; // Copy the clause from the buffer to the database
if (irr) S->mem_fixed = S->mem_used; else S->nLemmas++; // Update the statistics
return clause;
} // Return the pointer to the clause is the database
__device__
void reduceDB(struct solver* S, int k) { // Removes "less useful" lemmas from DB
//printf("Start reduceDB function\n");
while (S->nLemmas > S->maxLemmas) S->maxLemmas += 300; // Allow more lemmas in the future
S->nLemmas = 0; // Reset the number of lemmas
int i; for (i = -S->nVars; i <= S->nVars; i++) { // Loop over the variables
if (i == 0) continue; int* watch = &S->first[i]; // Get the pointer to the first watched clause
while (*watch != END) // As long as there are watched clauses
if (*watch < S->mem_fixed) watch = (S->DB + *watch); // Remove the watch if it points to a lemma
else *watch = S->DB[*watch];
} // Otherwise (meaning an input clause) go to next watch
int old_used = S->mem_used; S->mem_used = S->mem_fixed; // Virtually remove all lemmas
for (i = S->mem_fixed + 2; i < old_used; i += 3) { // While the old memory contains lemmas
int count = 0, head = i; // Get the lemma to which the head is pointing
while (S->DB[i]) {
int lit = S->DB[i++]; // Count the number of literals
if ((lit > 0) == S->model[abs(lit)]) count++;
} // That are satisfied by the current model
if (count < k) addClause(S, S->DB + head, i - head, 0);
}
} // If the latter is smaller than k, add it back
__device__
void bump(struct solver* S, int lit) { // Move the variable to the front of the decision list
//printf("Start bump function\n");
if (S->_false[lit] != IMPLIED) {
S->_false[lit] = MARK; // MARK the literal as involved if not a top-level unit
int var = abs(lit); if (var != S->head) { // In case var is not already the head of the list
S->prev[S->next[var]] = S->prev[var]; // Update the prev link, and
S->next[S->prev[var]] = S->next[var]; // Update the next link, and
S->next[S->head] = var; // Add a next link to the head, and
S->prev[var] = S->head; S->head = var;
}
}
} // Make var the new head
__device__
int implied(struct solver* S, int lit) { // Check if lit(eral) is implied by MARK literals
// printf("Start implied function\n");
if (S->_false[lit] > MARK) return (S->_false[lit] & MARK); // If checked before return old result
if (!S->reason[abs(lit)]) return 0; // In case lit is a decision, it is not implied
int* p = (S->DB + S->reason[abs(lit)] - 1); // Get the reason of lit(eral)
while (*(++p)) // While there are literals in the reason
if ((S->_false[*p] ^ MARK) && !implied(S, *p)) { // Recursively check if non-MARK literals are implied
S->_false[lit] = IMPLIED - 1; return 0;
} // Mark and return not implied (denoted by IMPLIED - 1)
S->_false[lit] = IMPLIED; return 1;
} // Mark and return that the literal is implied
__device__
int* analyze(struct solver* S, int* clause) { // Compute a resolvent from falsified clause
// printf("Start analyze\n");
S->res++; S->nConflicts++; // Bump restarts and update the statistic
while (*clause) bump(S, *(clause++)); // MARK all literals in the falsified clause
while (S->reason[abs(*(--S->assigned))]) { // Loop on variables on falseStack until the last decision
if (S->_false[*S->assigned] == MARK) { // If the tail of the stack is MARK
int* check = S->assigned; // Pointer to check if first-UIP is reached
while (S->_false[*(--check)] != MARK) // Check for a MARK literal before decision
if (!S->reason[abs(*check)]) goto build; // Otherwise it is the first-UIP so break
clause = S->DB + S->reason[abs(*S->assigned)]; // Get the reason and ignore first literal
while (*clause) bump(S, *(clause++));
} // MARK all literals in reason
unassign(S, *S->assigned);
} // Unassign the tail of the stack
build:; int size = 0, lbd = 0, flag = 0; // Build conflict clause; Empty the clause buffer
int* p = S->processed = S->assigned; // Loop from tail to front
while (p >= S->forced) { // Only literals on the stack can be MARKed
if ((S->_false[*p] == MARK) && !implied(S, *p)) { // If MARKed and not implied
S->buffer[size++] = *p; flag = 1;
} // Add literal to conflict clause buffer
if (!S->reason[abs(*p)]) {
lbd += flag; flag = 0; // Increase LBD for a decision with a true flag
if (size == 1) S->processed = p;
} // And update the processed pointer
S->_false[*(p--)] = 1;
} // Reset the MARK flag for all variables on the stack
S->fast -= S->fast >> 5; S->fast += lbd << 15; // Update the fast moving average
S->slow -= S->slow >> 15; S->slow += lbd << 5; // Update the slow moving average
while (S->assigned > S->processed) // Loop over all unprocessed literals
unassign(S, *(S->assigned--)); // Unassign all lits between tail & head
unassign(S, *S->assigned); // Assigned now equal to processed
S->buffer[size] = 0; // Terminate the buffer (and potentially print clause)
return addClause(S, S->buffer, size, 0);
} // Add new conflict clause to redundant DB
__device__
int propagate(struct solver* S) { // Performs unit propagation
int forced = S->reason[abs(*S->processed)]; // Initialize forced flag
while (S->processed < S->assigned) { // While unprocessed false literals
int lit = *(S->processed++); // Get first unprocessed literal
int* watch = &S->first[lit]; // Obtain the first watch pointer
while (*watch != END) { // While there are watched clauses (watched by lit)
int i, unit = 1; // Let's assume that the clause is unit
int* clause = (S->DB + *watch + 1); // Get the clause from DB
if (clause[-2] == 0) clause++; // Set the pointer to the first literal in the clause
if (clause[0] == lit) clause[0] = clause[1]; // Ensure that the other watched literal is in front
for (i = 2; unit && clause[i]; i++) // Scan the non-watched literals
if (!S->_false[clause[i]]) { // When clause[i] is not false, it is either true or unset
clause[1] = clause[i]; clause[i] = lit; // Swap literals
int store = *watch; unit = 0; // Store the old watch
*watch = S->DB[*watch]; // Remove the watch from the list of lit
//printf("add watch\n");
addWatch(S, clause[1], store);
} // Add the watch to the list of clause[1]
if (unit) { // If the clause is indeed unit
//printf("unit\n");
clause[1] = lit; watch = (S->DB + *watch); // Place lit at clause[1] and update next watch
if (S->_false[-clause[0]]) continue; // If the other watched literal is satisfied continue
if (!S->_false[clause[0]]) { // If the other watched literal is falsified,
assign(S, clause, forced);
} // A unit clause is found, and the reason is set
else {
if (forced) { // Found a root level conflict -> UNSAT
//S->result = 0;
return UNSAT;
}
int* lemma = analyze(S, clause); // Analyze the conflict return a conflict clause
if (!lemma[1]) forced = 1; // In case a unit clause is found, set forced flag
assign(S, lemma, forced); break;
}
}
}
} // Assign the conflict clause as a unit
if (forced) S->forced = S->processed; // Set S->forced if applicable
//S->result = 1;
return SAT;
} // Finally, no conflict was found
__global__
void solve(struct solver** multi_s) { // Determine satisfiability
struct solver* S = multi_s[blockIdx.x];
int decision = S->head; S->res = 0; // Initialize the solver
for (;;) { // Main solve loop
int old_nLemmas = S->nLemmas; // Store nLemmas to see whether propagate adds lemmas
int res = propagate(S);
if (res == UNSAT) {
printf("file_%d=UNSAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id, S->nVars, S->nClauses, S->mem_used, S->nConflicts, S->maxLemmas);
multi_s[threadIdx.x]->result = UNSAT;
//printf("result -->", S->result);
return;
} // Propagation returns UNSAT for a root level conflict
if (S->nLemmas > old_nLemmas) { // If the last decision caused a conflict
decision = S->head; // Reset the decision heuristic to head
if (S->fast > (S->slow / 100) * 125) { // If fast average is substantially larger than slow average
// printf("c restarting after %i conflicts (%i %i) %i\n", S->res, S->fast, S->slow, S->nLemmas > S->maxLemmas);
S->res = 0; S->fast = (S->slow / 100) * 125; restart(S); // Restart and update the averages
if (S->nLemmas > S->maxLemmas) reduceDB(S, 6);
}
} // Reduce the DB when it contains too many lemmas
while (S->_false[decision] || S->_false[-decision]) { // As long as the temporay decision is assigned
decision = S->prev[decision];
}
//printf("decision: %d \n", decision); // Replace it with the next variable in the decision list
if (decision == 0) {
printf("file_%d=SAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id,S->nVars,S->nClauses,S->mem_used,S->nConflicts,S->maxLemmas);
multi_s[threadIdx.x]->result = SAT;
//printf("result -->", S->result );
return; // If the end of the list is reached, then a solution is found
}
decision = S->model[decision] ? decision : -decision; // Otherwise, assign the decision variable based on the model
S->_false[-decision] = 1; // Assign the decision literal to true (change to IMPLIED-1?)
*(S->assigned++) = -decision; // And push it on the assigned stack
decision = abs(decision); S->reason[decision] = 0;
}
} // Decisions have no reason clauses
__global__
void init(struct solver* S, int* dev_elements, int nElements, int nVars, int nClauses, int* db,
int* file_id, int DB_MAX_MEM, int CLAUSE_LEARN_MAX_MEM, int INITIAL_MAX_LEMMAS) {
S->file_id = *file_id;
S->nVars = nVars;
S->nClauses = nClauses;
//S->mem_max = 100000; // Set the initial maximum memory
S->mem_max = DB_MAX_MEM; // Set the initial maximum memory
S->mem_used = 0; // The number of integers allocated in the DB
S->nLemmas = 0; // The number of learned clauses -- redundant means learned
S->nConflicts = 0; // Under of conflicts which is used to updates scores
S->maxLemmas = INITIAL_MAX_LEMMAS; // Initial maximum number of learnt clauses
//S->fast = S->slow = 1 << 24; // Initialize the fast and slow moving averages
S->fast = S->slow = CLAUSE_LEARN_MAX_MEM; // Initialize the fast and slow moving averages
S->result = -1;
S->DB = db;
S->model = getMemory(S, S->nVars + 1); // Full assignment of the (Boolean) variables (initially set to false)
S->next = getMemory(S, S->nVars + 1); // Next variable in the heuristic order
S->prev = getMemory(S, S->nVars + 1); // Previous variable in the heuristic order
S->buffer = getMemory(S, S->nVars); // A buffer to store a temporary clause
S->reason = getMemory(S, S->nVars + 1); // Array of clauses
S->falseStack = getMemory(S, S->nVars + 1); // Stack of falsified literals -- this pointer is never changed
S->forced = S->falseStack; // Points inside *falseStack at first decision (unforced literal)
S->processed = S->falseStack; // Points inside *falseStack at first unprocessed literal
S->assigned = S->falseStack; // Points inside *falseStack at last unprocessed literal
S->_false = getMemory(S, 2 * S->nVars + 1);
S->_false += S->nVars; // Labels for variables, non-zero means false
S->first = getMemory(S, 2 * S->nVars + 1);
S->first += S->nVars; // Offset of the first watched clause
S->DB[S->mem_used++] = 0; // Make sure there is a 0 before the clauses are loaded.
int i; for (i = 1; i <= S->nVars; i++) { // Initialize the main datastructes:
S->prev[i] = i - 1;
S->next[i - 1] = i;
S->model[i] = S->_false[-i] = S->_false[i] = 0;
S->first[i] = S->first[-i] = END; // and first (watch pointers).
S->head = S->nVars; // Initialize the head of the double-linked list
}
int nZeros = S->nClauses, size = 0; // Initialize the number of clauses to read
for (int i = 0; i < nElements; i++) { // While there are elements
int lit = 0;
lit = dev_elements[i];
if (!lit) { // If reaching the end of the clause
int* clause = addClause(S, S->buffer, size, 1); // Then add the clause to data_base
if (!size || ((size == 1) && S->_false[clause[0]])) { // Check for empty clause or conflicting unit
printf("\n + UNSAT + \n");
S->result = 1;
return;
} // If either is found return UNSAT
if ((size == 1) && !S->_false[-clause[0]]) { // Check for a new unit
assign(S, clause, 1);
} // Directly assign new units (forced = 1)
size = 0; --nZeros;
}
else S->buffer[size++] = lit;
}
//printf("\n INITIALIZED \n");
} // Return that no conflict was observed
__host__
static void read_until_new_line(FILE* input) {
int ch;
while ((ch = getc(input)) != '\n')
if (ch == EOF) { printf("parse error: unexpected EOF"); exit(1); }
}
int main(int argc, char** argv) {
if (argc < 5) {
printf("USAGE: ./mcuda <formulas dir> <DB_MAX_MEM> <CLAUSE_LEARN_MAX_MEM> <INITIAL_MAX_LEMMAS>\n");
return 0;
}
hipDeviceReset();
showMem();
//char* directory = "C://microsat//sat";
char* directory = argv[1];
int num_file = 0;
int nVars = 0;
int nClauses = 0;
Metrics exec_metrics = { 0, 0, 0, 0, 0 };
int db_max_mem = atoi(argv[2]);
int clause_learn_max_mem = atoi(argv[3]);
int initial_max_mem = atoi(argv[4]);
printf("DB_MAX_MEM: %d\n", db_max_mem);
printf("CLAUSE_LEARN_MAX_MEM: %d\n", clause_learn_max_mem);
printf("INITIAL_MAX_LEMMAS: %d\n", initial_max_mem);
//deviceInfo();
clock_t start, end;
printf(" Start\n");
start = clock();
DIR* dirp;
struct dirent* entry;
dirp = opendir(directory);
while ((entry = readdir(dirp)) != NULL) {
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
num_file++;
}
}
closedir(dirp);
exec_metrics.files_count = num_file;
// printf(" num file -> %d\n",num_file);
solver** h_multi_struct;
h_multi_struct = (solver**)malloc(num_file * sizeof(solver*));
solver** d_multi_struct;
gpuErrchk(hipMalloc((void**)&d_multi_struct, num_file * sizeof(solver*)));
if (NULL == (dirp = opendir(directory)))
{
printf("Error : Failed to open input directory \n");
return 1;
}
clock_t start_parse = clock();
int* db;
int mem = sizeof(int) * db_max_mem; //TODO: allocazione dinamica della memoria
gpuErrchk(hipMalloc((void**)&db, mem * num_file));
// showMem();
int count = 0;
while ((entry = readdir(dirp)))
{
if (!strcmp(entry->d_name, "."))
continue;
if (!strcmp(entry->d_name, ".."))
continue;
char path[100] = ""; //TODO: magic number
strcpy(path, directory);
strcat(path, "//");
strcat(path, entry->d_name);
//printf("file_%d=%s\n", count, entry->d_name);
FILE* input = fopen(path, "r");
if (input == NULL)
{
printf("Error : Failed to open entry file \n");
fclose(input);
return 1;
}
struct solver* dev_s;
gpuErrchk(hipMalloc((void**)&dev_s, sizeof(solver)));
struct stat st;
stat(path, &st);
int size = st.st_size;
//printf("\n size -> %d\n", size);
int* buffer = 0;
buffer = (int*)malloc(size * sizeof(int));
/********* FILE PARSER **************/
int tmp;
while ((tmp = getc(input)) == 'c') read_until_new_line(input);
ungetc(tmp, input);
do {
tmp = fscanf(input, " p cnf %i %i \n", &nVars, &nClauses);
if (tmp > 0 && tmp != EOF) break; tmp = fscanf(input, "%*s\n");
} while (tmp != 2 && tmp != EOF);
int nElements = 0;
do {
int ch = getc(input);
if (ch == '\%') break; //we have % as EOF in some dimacs files
if (ch == ' ' || ch == '\n') continue;
if (ch == 'c') { read_until_new_line(input); continue; }
ungetc(ch, input);
int lit = 0;
tmp = fscanf(input, " %i ", &lit);
buffer[nElements] = lit;
//printf("%d ", lit);
nElements++;
} while (tmp != EOF);
nElements--; // TO CHECK
int* elements = 0;
elements = (int*)malloc(nElements * sizeof(int));
for (int i = 0; i < nElements; i++) {
elements[i] = buffer[i];
}
fclose(input);
/********* FILE PARSER **************/
int* dev_file_id;
gpuErrchk(hipMalloc((void**)&dev_file_id, sizeof(int)));
gpuErrchk(hipMemcpy(dev_file_id, &count, sizeof(int), hipMemcpyHostToDevice));
int* dev_elements;
gpuErrchk(hipMalloc((void**)&dev_elements, nElements * sizeof(int)));
gpuErrchk(hipMemcpy(dev_elements, elements, nElements * sizeof(int), hipMemcpyHostToDevice));
free(buffer);
free(elements);
//hipDeviceSetLimit(hipLimitMallocHeapSize, 128 * 1024 * 1024);
//printf("\n INIT \n");
hipEvent_t d_start_init, d_stop_init;
hipEventCreate(&d_start_init);
hipEventCreate(&d_stop_init);
hipEventRecord(d_start_init, 0);
int* db_offset = db + (db_max_mem * count);
init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, db_offset, dev_file_id, mem, clause_learn_max_mem, initial_max_mem);
hipEventRecord(d_stop_init, 0);
hipEventSynchronize(d_stop_init);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, d_start_init, d_stop_init); // that's our time!
exec_metrics.init_time += elapsedTime;
// Clean up:
hipEventDestroy(d_start_init);
hipEventDestroy(d_stop_init);
//printf("parsing_file -> %s\n", entry->d_name);
//printf("device_time -> %f s\n", elapsedTime / 1000000);
//exec_metrics.init_time += elapsedTime / 1000000;
gpuErrchk(hipDeviceSynchronize());
//temp
//printf("\n dev_s -> %p\n",dev_s);
h_multi_struct[count] = dev_s;
count++;
}
/*********** end init and parse ***********/
exec_metrics.parse_time = (clock() - start_parse);
hipMemcpy(d_multi_struct, h_multi_struct, num_file * sizeof(solver*), hipMemcpyHostToDevice);
//temp end
printf("\n SOLVE \n");
showMem();
hipEvent_t d_start, d_stop;
hipEventCreate(&d_start);
hipEventCreate(&d_stop);
hipEventRecord(d_start, 0);
// solve<< <1, num_file >> > (d_multi_struct);
solve << <num_file, 1 >> > (d_multi_struct);
hipEventRecord(d_stop, 0);
hipEventSynchronize(d_stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, d_start, d_stop); // that's our time!
// Clean up:
hipEventDestroy(d_start);
hipEventDestroy(d_stop);
//printf("\n total solve time -> %f s\n", elapsedTime / 1000000);
exec_metrics.solve_time = elapsedTime;
hipDeviceSynchronize();
end = clock();
//printf("\n total time: %f s\n", (float)(end - start) / 1000000);
exec_metrics.tot_time = (float)(end - start);
printf("\n+++ metrics +++\n");
showMem();
printf("files count: %d\nparse time (s): %f\ncuda init time (s): %f\ncuda solve time (s): %f\ntot time (s): %f\n\n", exec_metrics.files_count, exec_metrics.parse_time / CLOCKS_PER_SEC, exec_metrics.init_time / 1000, exec_metrics.solve_time / 1000, exec_metrics.tot_time / CLOCKS_PER_SEC);
//printf ("c statistics of %s: mem: %i conflicts: %i max_lemmas: %i\n", argv[1], S.mem_used, S.nConflicts, S.maxLemmas);
//printf("\n END \n");
hipDeviceReset();
return 0;
}
|
3a3ff39552a8750b930608f89413c835f2d73d2e.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <sys/stat.h>
#include <dirent.h>
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <string>
//#include <pthread.h>
//#include "thpool.h"
struct solver { // The variables in the struct are described in the allocate procedure
int* DB, nVars, nClauses, mem_used, mem_fixed, mem_max, maxLemmas, nLemmas, * buffer, nConflicts, * model,
* reason, * falseStack, * _false, * first, * forced, * processed, * assigned, * next, * prev, head, res, fast, slow,
result, file_id;
};
typedef struct {
int files_count;
double parse_time;
double init_time;
double solve_time;
double tot_time;
} Metrics;
enum { END = -9, UNSAT = 0, SAT = 1, MARK = 2, IMPLIED = 6 };
void deviceInfo(){
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void showMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if ( cudaSuccess != cuda_status ){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
//showMem();
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
int* getMemory(struct solver* S, int mem_size) { // Allocate memory of size mem_size
if (S->mem_used + mem_size > S->mem_max) { // In case the code is used within a code base
printf("c out of memory\n");
return 0;
}
int* store = (S->DB + S->mem_used); // Compute a pointer to the new memory location
S->mem_used += mem_size; // Update the size of the used memory
return store;
} // Return the pointer
__device__
void unassign(struct solver* S, int lit) { S->_false[lit] = 0; } // Unassign the literal
__device__
void restart(struct solver* S) { // Perform a restart (i.e., unassign all variables)
while (S->assigned > S->forced) unassign(S, *(--S->assigned)); // Remove all unforced false lits from falseStack
S->processed = S->forced;
} // Reset the processed pointer
__device__
void assign(struct solver* S, int* reason, int forced) { // Make the first literal of the reason true
int lit = reason[0]; // Let lit be the first ltieral in the reason
S->_false[-lit] = forced ? IMPLIED : 1; // Mark lit as true and IMPLIED if forced
*(S->assigned++) = -lit; // Push it on the assignment stack
S->reason[abs(lit)] = 1 + (int)((reason)-S->DB); // Set the reason clause of lit
S->model[abs(lit)] = (lit > 0);
} // Mark the literal as true in the model
__device__
void addWatch(struct solver* S, int lit, int mem) { // Add a watch pointer to a cfor entry function '_Z5solveP6solver' containing lit
S->DB[mem] = S->first[lit]; S->first[lit] = mem;
} // By updating the database afor entry function '_Z5solveP6solver'e pointers
__device__
int* addClause(struct solver* S, int* buffer, int size, int irr) { // Adds a clause stored in *in of size size
int i, used = S->mem_used; // Store a pointer to the beginning of the clause
int* clause = getMemory(S, size + 3) + 2; // Allocate memory for the clause in the database
if (size > 1) {
addWatch(S, buffer[0], used); // If the clause is not unit, then add
addWatch(S, buffer[1], used + 1);
} // Two watch pointers to the datastructure
for (i = 0; i < size; i++) clause[i] = buffer[i]; clause[i] = 0; // Copy the clause from the buffer to the database
if (irr) S->mem_fixed = S->mem_used; else S->nLemmas++; // Update the statistics
return clause;
} // Return the pointer to the clause is the database
__device__
void reduceDB(struct solver* S, int k) { // Removes "less useful" lemmas from DB
//printf("Start reduceDB function\n");
while (S->nLemmas > S->maxLemmas) S->maxLemmas += 300; // Allow more lemmas in the future
S->nLemmas = 0; // Reset the number of lemmas
int i; for (i = -S->nVars; i <= S->nVars; i++) { // Loop over the variables
if (i == 0) continue; int* watch = &S->first[i]; // Get the pointer to the first watched clause
while (*watch != END) // As long as there are watched clauses
if (*watch < S->mem_fixed) watch = (S->DB + *watch); // Remove the watch if it points to a lemma
else *watch = S->DB[*watch];
} // Otherwise (meaning an input clause) go to next watch
int old_used = S->mem_used; S->mem_used = S->mem_fixed; // Virtually remove all lemmas
for (i = S->mem_fixed + 2; i < old_used; i += 3) { // While the old memory contains lemmas
int count = 0, head = i; // Get the lemma to which the head is pointing
while (S->DB[i]) {
int lit = S->DB[i++]; // Count the number of literals
if ((lit > 0) == S->model[abs(lit)]) count++;
} // That are satisfied by the current model
if (count < k) addClause(S, S->DB + head, i - head, 0);
}
} // If the latter is smaller than k, add it back
__device__
void bump(struct solver* S, int lit) { // Move the variable to the front of the decision list
//printf("Start bump function\n");
if (S->_false[lit] != IMPLIED) {
S->_false[lit] = MARK; // MARK the literal as involved if not a top-level unit
int var = abs(lit); if (var != S->head) { // In case var is not already the head of the list
S->prev[S->next[var]] = S->prev[var]; // Update the prev link, and
S->next[S->prev[var]] = S->next[var]; // Update the next link, and
S->next[S->head] = var; // Add a next link to the head, and
S->prev[var] = S->head; S->head = var;
}
}
} // Make var the new head
__device__
int implied(struct solver* S, int lit) { // Check if lit(eral) is implied by MARK literals
// printf("Start implied function\n");
if (S->_false[lit] > MARK) return (S->_false[lit] & MARK); // If checked before return old result
if (!S->reason[abs(lit)]) return 0; // In case lit is a decision, it is not implied
int* p = (S->DB + S->reason[abs(lit)] - 1); // Get the reason of lit(eral)
while (*(++p)) // While there are literals in the reason
if ((S->_false[*p] ^ MARK) && !implied(S, *p)) { // Recursively check if non-MARK literals are implied
S->_false[lit] = IMPLIED - 1; return 0;
} // Mark and return not implied (denoted by IMPLIED - 1)
S->_false[lit] = IMPLIED; return 1;
} // Mark and return that the literal is implied
__device__
int* analyze(struct solver* S, int* clause) { // Compute a resolvent from falsified clause
// printf("Start analyze\n");
S->res++; S->nConflicts++; // Bump restarts and update the statistic
while (*clause) bump(S, *(clause++)); // MARK all literals in the falsified clause
while (S->reason[abs(*(--S->assigned))]) { // Loop on variables on falseStack until the last decision
if (S->_false[*S->assigned] == MARK) { // If the tail of the stack is MARK
int* check = S->assigned; // Pointer to check if first-UIP is reached
while (S->_false[*(--check)] != MARK) // Check for a MARK literal before decision
if (!S->reason[abs(*check)]) goto build; // Otherwise it is the first-UIP so break
clause = S->DB + S->reason[abs(*S->assigned)]; // Get the reason and ignore first literal
while (*clause) bump(S, *(clause++));
} // MARK all literals in reason
unassign(S, *S->assigned);
} // Unassign the tail of the stack
build:; int size = 0, lbd = 0, flag = 0; // Build conflict clause; Empty the clause buffer
int* p = S->processed = S->assigned; // Loop from tail to front
while (p >= S->forced) { // Only literals on the stack can be MARKed
if ((S->_false[*p] == MARK) && !implied(S, *p)) { // If MARKed and not implied
S->buffer[size++] = *p; flag = 1;
} // Add literal to conflict clause buffer
if (!S->reason[abs(*p)]) {
lbd += flag; flag = 0; // Increase LBD for a decision with a true flag
if (size == 1) S->processed = p;
} // And update the processed pointer
S->_false[*(p--)] = 1;
} // Reset the MARK flag for all variables on the stack
S->fast -= S->fast >> 5; S->fast += lbd << 15; // Update the fast moving average
S->slow -= S->slow >> 15; S->slow += lbd << 5; // Update the slow moving average
while (S->assigned > S->processed) // Loop over all unprocessed literals
unassign(S, *(S->assigned--)); // Unassign all lits between tail & head
unassign(S, *S->assigned); // Assigned now equal to processed
S->buffer[size] = 0; // Terminate the buffer (and potentially print clause)
return addClause(S, S->buffer, size, 0);
} // Add new conflict clause to redundant DB
__device__
int propagate(struct solver* S) { // Performs unit propagation
int forced = S->reason[abs(*S->processed)]; // Initialize forced flag
while (S->processed < S->assigned) { // While unprocessed false literals
int lit = *(S->processed++); // Get first unprocessed literal
int* watch = &S->first[lit]; // Obtain the first watch pointer
while (*watch != END) { // While there are watched clauses (watched by lit)
int i, unit = 1; // Let's assume that the clause is unit
int* clause = (S->DB + *watch + 1); // Get the clause from DB
if (clause[-2] == 0) clause++; // Set the pointer to the first literal in the clause
if (clause[0] == lit) clause[0] = clause[1]; // Ensure that the other watched literal is in front
for (i = 2; unit && clause[i]; i++) // Scan the non-watched literals
if (!S->_false[clause[i]]) { // When clause[i] is not false, it is either true or unset
clause[1] = clause[i]; clause[i] = lit; // Swap literals
int store = *watch; unit = 0; // Store the old watch
*watch = S->DB[*watch]; // Remove the watch from the list of lit
//printf("add watch\n");
addWatch(S, clause[1], store);
} // Add the watch to the list of clause[1]
if (unit) { // If the clause is indeed unit
//printf("unit\n");
clause[1] = lit; watch = (S->DB + *watch); // Place lit at clause[1] and update next watch
if (S->_false[-clause[0]]) continue; // If the other watched literal is satisfied continue
if (!S->_false[clause[0]]) { // If the other watched literal is falsified,
assign(S, clause, forced);
} // A unit clause is found, and the reason is set
else {
if (forced) { // Found a root level conflict -> UNSAT
//S->result = 0;
return UNSAT;
}
int* lemma = analyze(S, clause); // Analyze the conflict return a conflict clause
if (!lemma[1]) forced = 1; // In case a unit clause is found, set forced flag
assign(S, lemma, forced); break;
}
}
}
} // Assign the conflict clause as a unit
if (forced) S->forced = S->processed; // Set S->forced if applicable
//S->result = 1;
return SAT;
} // Finally, no conflict was found
__global__
void solve(struct solver** multi_s) { // Determine satisfiability
struct solver* S = multi_s[blockIdx.x];
int decision = S->head; S->res = 0; // Initialize the solver
for (;;) { // Main solve loop
int old_nLemmas = S->nLemmas; // Store nLemmas to see whether propagate adds lemmas
int res = propagate(S);
if (res == UNSAT) {
printf("file_%d=UNSAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id, S->nVars, S->nClauses, S->mem_used, S->nConflicts, S->maxLemmas);
multi_s[threadIdx.x]->result = UNSAT;
//printf("result -->", S->result);
return;
} // Propagation returns UNSAT for a root level conflict
if (S->nLemmas > old_nLemmas) { // If the last decision caused a conflict
decision = S->head; // Reset the decision heuristic to head
if (S->fast > (S->slow / 100) * 125) { // If fast average is substantially larger than slow average
// printf("c restarting after %i conflicts (%i %i) %i\n", S->res, S->fast, S->slow, S->nLemmas > S->maxLemmas);
S->res = 0; S->fast = (S->slow / 100) * 125; restart(S); // Restart and update the averages
if (S->nLemmas > S->maxLemmas) reduceDB(S, 6);
}
} // Reduce the DB when it contains too many lemmas
while (S->_false[decision] || S->_false[-decision]) { // As long as the temporay decision is assigned
decision = S->prev[decision];
}
//printf("decision: %d \n", decision); // Replace it with the next variable in the decision list
if (decision == 0) {
printf("file_%d=SAT,vars=%i,clauses=%i,mem=%i,conflicts=%i,lemmas=%i\n", S->file_id,S->nVars,S->nClauses,S->mem_used,S->nConflicts,S->maxLemmas);
multi_s[threadIdx.x]->result = SAT;
//printf("result -->", S->result );
return; // If the end of the list is reached, then a solution is found
}
decision = S->model[decision] ? decision : -decision; // Otherwise, assign the decision variable based on the model
S->_false[-decision] = 1; // Assign the decision literal to true (change to IMPLIED-1?)
*(S->assigned++) = -decision; // And push it on the assigned stack
decision = abs(decision); S->reason[decision] = 0;
}
} // Decisions have no reason clauses
__global__
void init(struct solver* S, int* dev_elements, int nElements, int nVars, int nClauses, int* db,
int* file_id, int DB_MAX_MEM, int CLAUSE_LEARN_MAX_MEM, int INITIAL_MAX_LEMMAS) {
S->file_id = *file_id;
S->nVars = nVars;
S->nClauses = nClauses;
//S->mem_max = 100000; // Set the initial maximum memory
S->mem_max = DB_MAX_MEM; // Set the initial maximum memory
S->mem_used = 0; // The number of integers allocated in the DB
S->nLemmas = 0; // The number of learned clauses -- redundant means learned
S->nConflicts = 0; // Under of conflicts which is used to updates scores
S->maxLemmas = INITIAL_MAX_LEMMAS; // Initial maximum number of learnt clauses
//S->fast = S->slow = 1 << 24; // Initialize the fast and slow moving averages
S->fast = S->slow = CLAUSE_LEARN_MAX_MEM; // Initialize the fast and slow moving averages
S->result = -1;
S->DB = db;
S->model = getMemory(S, S->nVars + 1); // Full assignment of the (Boolean) variables (initially set to false)
S->next = getMemory(S, S->nVars + 1); // Next variable in the heuristic order
S->prev = getMemory(S, S->nVars + 1); // Previous variable in the heuristic order
S->buffer = getMemory(S, S->nVars); // A buffer to store a temporary clause
S->reason = getMemory(S, S->nVars + 1); // Array of clauses
S->falseStack = getMemory(S, S->nVars + 1); // Stack of falsified literals -- this pointer is never changed
S->forced = S->falseStack; // Points inside *falseStack at first decision (unforced literal)
S->processed = S->falseStack; // Points inside *falseStack at first unprocessed literal
S->assigned = S->falseStack; // Points inside *falseStack at last unprocessed literal
S->_false = getMemory(S, 2 * S->nVars + 1);
S->_false += S->nVars; // Labels for variables, non-zero means false
S->first = getMemory(S, 2 * S->nVars + 1);
S->first += S->nVars; // Offset of the first watched clause
S->DB[S->mem_used++] = 0; // Make sure there is a 0 before the clauses are loaded.
int i; for (i = 1; i <= S->nVars; i++) { // Initialize the main datastructes:
S->prev[i] = i - 1;
S->next[i - 1] = i;
S->model[i] = S->_false[-i] = S->_false[i] = 0;
S->first[i] = S->first[-i] = END; // and first (watch pointers).
S->head = S->nVars; // Initialize the head of the double-linked list
}
int nZeros = S->nClauses, size = 0; // Initialize the number of clauses to read
for (int i = 0; i < nElements; i++) { // While there are elements
int lit = 0;
lit = dev_elements[i];
if (!lit) { // If reaching the end of the clause
int* clause = addClause(S, S->buffer, size, 1); // Then add the clause to data_base
if (!size || ((size == 1) && S->_false[clause[0]])) { // Check for empty clause or conflicting unit
printf("\n + UNSAT + \n");
S->result = 1;
return;
} // If either is found return UNSAT
if ((size == 1) && !S->_false[-clause[0]]) { // Check for a new unit
assign(S, clause, 1);
} // Directly assign new units (forced = 1)
size = 0; --nZeros;
}
else S->buffer[size++] = lit;
}
//printf("\n INITIALIZED \n");
} // Return that no conflict was observed
__host__
static void read_until_new_line(FILE* input) {
int ch;
while ((ch = getc(input)) != '\n')
if (ch == EOF) { printf("parse error: unexpected EOF"); exit(1); }
}
int main(int argc, char** argv) {
if (argc < 5) {
printf("USAGE: ./mcuda <formulas dir> <DB_MAX_MEM> <CLAUSE_LEARN_MAX_MEM> <INITIAL_MAX_LEMMAS>\n");
return 0;
}
cudaDeviceReset();
showMem();
//char* directory = "C://microsat//sat";
char* directory = argv[1];
int num_file = 0;
int nVars = 0;
int nClauses = 0;
Metrics exec_metrics = { 0, 0, 0, 0, 0 };
int db_max_mem = atoi(argv[2]);
int clause_learn_max_mem = atoi(argv[3]);
int initial_max_mem = atoi(argv[4]);
printf("DB_MAX_MEM: %d\n", db_max_mem);
printf("CLAUSE_LEARN_MAX_MEM: %d\n", clause_learn_max_mem);
printf("INITIAL_MAX_LEMMAS: %d\n", initial_max_mem);
//deviceInfo();
clock_t start, end;
printf(" Start\n");
start = clock();
DIR* dirp;
struct dirent* entry;
dirp = opendir(directory);
while ((entry = readdir(dirp)) != NULL) {
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
num_file++;
}
}
closedir(dirp);
exec_metrics.files_count = num_file;
// printf(" num file -> %d\n",num_file);
solver** h_multi_struct;
h_multi_struct = (solver**)malloc(num_file * sizeof(solver*));
solver** d_multi_struct;
gpuErrchk(cudaMalloc((void**)&d_multi_struct, num_file * sizeof(solver*)));
if (NULL == (dirp = opendir(directory)))
{
printf("Error : Failed to open input directory \n");
return 1;
}
clock_t start_parse = clock();
int* db;
int mem = sizeof(int) * db_max_mem; //TODO: allocazione dinamica della memoria
gpuErrchk(cudaMalloc((void**)&db, mem * num_file));
// showMem();
int count = 0;
while ((entry = readdir(dirp)))
{
if (!strcmp(entry->d_name, "."))
continue;
if (!strcmp(entry->d_name, ".."))
continue;
char path[100] = ""; //TODO: magic number
strcpy(path, directory);
strcat(path, "//");
strcat(path, entry->d_name);
//printf("file_%d=%s\n", count, entry->d_name);
FILE* input = fopen(path, "r");
if (input == NULL)
{
printf("Error : Failed to open entry file \n");
fclose(input);
return 1;
}
struct solver* dev_s;
gpuErrchk(cudaMalloc((void**)&dev_s, sizeof(solver)));
struct stat st;
stat(path, &st);
int size = st.st_size;
//printf("\n size -> %d\n", size);
int* buffer = 0;
buffer = (int*)malloc(size * sizeof(int));
/********* FILE PARSER **************/
int tmp;
while ((tmp = getc(input)) == 'c') read_until_new_line(input);
ungetc(tmp, input);
do {
tmp = fscanf(input, " p cnf %i %i \n", &nVars, &nClauses);
if (tmp > 0 && tmp != EOF) break; tmp = fscanf(input, "%*s\n");
} while (tmp != 2 && tmp != EOF);
int nElements = 0;
do {
int ch = getc(input);
if (ch == '\%') break; //we have % as EOF in some dimacs files
if (ch == ' ' || ch == '\n') continue;
if (ch == 'c') { read_until_new_line(input); continue; }
ungetc(ch, input);
int lit = 0;
tmp = fscanf(input, " %i ", &lit);
buffer[nElements] = lit;
//printf("%d ", lit);
nElements++;
} while (tmp != EOF);
nElements--; // TO CHECK
int* elements = 0;
elements = (int*)malloc(nElements * sizeof(int));
for (int i = 0; i < nElements; i++) {
elements[i] = buffer[i];
}
fclose(input);
/********* FILE PARSER **************/
int* dev_file_id;
gpuErrchk(cudaMalloc((void**)&dev_file_id, sizeof(int)));
gpuErrchk(cudaMemcpy(dev_file_id, &count, sizeof(int), cudaMemcpyHostToDevice));
int* dev_elements;
gpuErrchk(cudaMalloc((void**)&dev_elements, nElements * sizeof(int)));
gpuErrchk(cudaMemcpy(dev_elements, elements, nElements * sizeof(int), cudaMemcpyHostToDevice));
free(buffer);
free(elements);
//cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * 1024 * 1024);
//printf("\n INIT \n");
cudaEvent_t d_start_init, d_stop_init;
cudaEventCreate(&d_start_init);
cudaEventCreate(&d_stop_init);
cudaEventRecord(d_start_init, 0);
int* db_offset = db + (db_max_mem * count);
init << <1, 1 >> > (dev_s, dev_elements, nElements, nVars, nClauses, db_offset, dev_file_id, mem, clause_learn_max_mem, initial_max_mem);
cudaEventRecord(d_stop_init, 0);
cudaEventSynchronize(d_stop_init);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, d_start_init, d_stop_init); // that's our time!
exec_metrics.init_time += elapsedTime;
// Clean up:
cudaEventDestroy(d_start_init);
cudaEventDestroy(d_stop_init);
//printf("parsing_file -> %s\n", entry->d_name);
//printf("device_time -> %f s\n", elapsedTime / 1000000);
//exec_metrics.init_time += elapsedTime / 1000000;
gpuErrchk(cudaDeviceSynchronize());
//temp
//printf("\n dev_s -> %p\n",dev_s);
h_multi_struct[count] = dev_s;
count++;
}
/*********** end init and parse ***********/
exec_metrics.parse_time = (clock() - start_parse);
cudaMemcpy(d_multi_struct, h_multi_struct, num_file * sizeof(solver*), cudaMemcpyHostToDevice);
//temp end
printf("\n SOLVE \n");
showMem();
cudaEvent_t d_start, d_stop;
cudaEventCreate(&d_start);
cudaEventCreate(&d_stop);
cudaEventRecord(d_start, 0);
// solve<< <1, num_file >> > (d_multi_struct);
solve << <num_file, 1 >> > (d_multi_struct);
cudaEventRecord(d_stop, 0);
cudaEventSynchronize(d_stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, d_start, d_stop); // that's our time!
// Clean up:
cudaEventDestroy(d_start);
cudaEventDestroy(d_stop);
//printf("\n total solve time -> %f s\n", elapsedTime / 1000000);
exec_metrics.solve_time = elapsedTime;
cudaDeviceSynchronize();
end = clock();
//printf("\n total time: %f s\n", (float)(end - start) / 1000000);
exec_metrics.tot_time = (float)(end - start);
printf("\n+++ metrics +++\n");
showMem();
printf("files count: %d\nparse time (s): %f\ncuda init time (s): %f\ncuda solve time (s): %f\ntot time (s): %f\n\n", exec_metrics.files_count, exec_metrics.parse_time / CLOCKS_PER_SEC, exec_metrics.init_time / 1000, exec_metrics.solve_time / 1000, exec_metrics.tot_time / CLOCKS_PER_SEC);
//printf ("c statistics of %s: mem: %i conflicts: %i max_lemmas: %i\n", argv[1], S.mem_used, S.nConflicts, S.maxLemmas);
//printf("\n END \n");
cudaDeviceReset();
return 0;
}
|
0531d5ed86c580fe5bf9fa5001c70f86cc6d2675.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <string>
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgproc.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define MAX_THREADS 32
using namespace std;
using namespace cv;
__global__ void threshold(uchar *input, size_t inputPitch, int rows, int cols, uchar *output, uchar thresholdValue) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows&&col < cols) {
uchar *pixelValue = (uchar*)((char*)input + row*inputPitch) + col;
uchar *outputPixelValue = (uchar*)((char*)output + row*inputPitch) + col;
if (*pixelValue < thresholdValue) {
*outputPixelValue = 0;
}
else {
*outputPixelValue = 255;
}
}
}
void thresholdImage(const Mat & input, Mat & output, uchar threholdValue) {
output = Mat(input.size(), CV_8U, Scalar(0));
uchar *d_input, *d_output;
size_t inputPitch, outputPitch;
hipMallocPitch(&d_input, &inputPitch, sizeof(uchar)*input.cols, input.rows);
hipMallocPitch(&d_output, &outputPitch, sizeof(uchar)*output.cols, output.rows);
hipStream_t inputCopy, outputCopy;
hipStreamCreate(&inputCopy); hipStreamCreate(&outputCopy);
hipMemcpy2DAsync(d_input, inputPitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, hipMemcpyHostToDevice, inputCopy);
hipMemcpy2DAsync(d_output, outputPitch, output.data, sizeof(uchar)*output.cols, sizeof(uchar)*output.cols, output.rows, hipMemcpyHostToDevice, outputCopy);
hipStreamSynchronize(inputCopy); hipStreamSynchronize(outputCopy);
dim3 blockSize(input.cols / MAX_THREADS + 1, input.rows / MAX_THREADS + 1);
dim3 threadSize(MAX_THREADS, MAX_THREADS);
hipLaunchKernelGGL(( threshold), dim3(blockSize), dim3(threadSize), 0, 0, d_input, inputPitch, input.rows, input.cols, d_output, threholdValue);
hipError_t error = hipDeviceSynchronize();
if (error != hipSuccess) {
cout << hipGetErrorString(error) << endl;
}
hipMemcpy2D(output.data, output.cols * sizeof(uchar), d_output, inputPitch, output.cols * sizeof(uchar), output.rows, hipMemcpyDeviceToHost);
hipStreamDestroy(inputCopy); hipStreamDestroy(outputCopy);
hipFree(d_input); hipFree(d_output);
}
int main() {
string path = "type-c.jpg";
Mat img = imread(path, IMREAD_GRAYSCALE);
Mat img1 = imread(path);
Mat result;
hipEvent_t start, end;
hipEventCreate(&start); hipEventCreate(&end);
hipEventRecord(start);
thresholdImage(img, result, 50);
hipEventRecord(end);
hipEventSynchronize(start); hipEventSynchronize(end);
float time;
hipEventElapsedTime(&time, start, end);
hipEventDestroy(start); hipEventDestroy(end);
cout << "time cost on cpu: " << time << " ms." << endl;
Mat th;
double cpuStart = (double)getTickCount();
threshold(img, th, 50, 255, img.type());
double cpuEnd = (double)getTickCount();
double cpuTime = (cpuEnd - cpuStart) / getTickFrequency();
cout << "time cost on cpu: " << cpuTime * 1000 << " ms." << endl;
string title = "CUDA";
namedWindow(title);
imshow(title, result);
imshow("CPU", th);
waitKey(0);
imwrite("threshold.jpg", result);
return 0;
}
|
0531d5ed86c580fe5bf9fa5001c70f86cc6d2675.cu
|
#include <stdio.h>
#include <iostream>
#include <string>
#include <opencv2\core.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\imgproc.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define MAX_THREADS 32
using namespace std;
using namespace cv;
__global__ void threshold(uchar *input, size_t inputPitch, int rows, int cols, uchar *output, uchar thresholdValue) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
if (row < rows&&col < cols) {
uchar *pixelValue = (uchar*)((char*)input + row*inputPitch) + col;
uchar *outputPixelValue = (uchar*)((char*)output + row*inputPitch) + col;
if (*pixelValue < thresholdValue) {
*outputPixelValue = 0;
}
else {
*outputPixelValue = 255;
}
}
}
void thresholdImage(const Mat & input, Mat & output, uchar threholdValue) {
output = Mat(input.size(), CV_8U, Scalar(0));
uchar *d_input, *d_output;
size_t inputPitch, outputPitch;
cudaMallocPitch(&d_input, &inputPitch, sizeof(uchar)*input.cols, input.rows);
cudaMallocPitch(&d_output, &outputPitch, sizeof(uchar)*output.cols, output.rows);
cudaStream_t inputCopy, outputCopy;
cudaStreamCreate(&inputCopy); cudaStreamCreate(&outputCopy);
cudaMemcpy2DAsync(d_input, inputPitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, cudaMemcpyHostToDevice, inputCopy);
cudaMemcpy2DAsync(d_output, outputPitch, output.data, sizeof(uchar)*output.cols, sizeof(uchar)*output.cols, output.rows, cudaMemcpyHostToDevice, outputCopy);
cudaStreamSynchronize(inputCopy); cudaStreamSynchronize(outputCopy);
dim3 blockSize(input.cols / MAX_THREADS + 1, input.rows / MAX_THREADS + 1);
dim3 threadSize(MAX_THREADS, MAX_THREADS);
threshold<<<blockSize, threadSize>>> (d_input, inputPitch, input.rows, input.cols, d_output, threholdValue);
cudaError_t error = cudaDeviceSynchronize();
if (error != cudaSuccess) {
cout << cudaGetErrorString(error) << endl;
}
cudaMemcpy2D(output.data, output.cols * sizeof(uchar), d_output, inputPitch, output.cols * sizeof(uchar), output.rows, cudaMemcpyDeviceToHost);
cudaStreamDestroy(inputCopy); cudaStreamDestroy(outputCopy);
cudaFree(d_input); cudaFree(d_output);
}
int main() {
string path = "type-c.jpg";
Mat img = imread(path, IMREAD_GRAYSCALE);
Mat img1 = imread(path);
Mat result;
cudaEvent_t start, end;
cudaEventCreate(&start); cudaEventCreate(&end);
cudaEventRecord(start);
thresholdImage(img, result, 50);
cudaEventRecord(end);
cudaEventSynchronize(start); cudaEventSynchronize(end);
float time;
cudaEventElapsedTime(&time, start, end);
cudaEventDestroy(start); cudaEventDestroy(end);
cout << "time cost on cpu: " << time << " ms." << endl;
Mat th;
double cpuStart = (double)getTickCount();
threshold(img, th, 50, 255, img.type());
double cpuEnd = (double)getTickCount();
double cpuTime = (cpuEnd - cpuStart) / getTickFrequency();
cout << "time cost on cpu: " << cpuTime * 1000 << " ms." << endl;
string title = "CUDA";
namedWindow(title);
imshow(title, result);
imshow("CPU", th);
waitKey(0);
imwrite("threshold.jpg", result);
return 0;
}
|
b4fd707baa200d89f3d117ccfda00c41cef337a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/lbp.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/saturate_cast.hpp>
namespace cv { namespace gpu { namespace device
{
namespace lbp
{
struct LBP
{
__host__ __device__ __forceinline__ LBP() {}
__device__ __forceinline__ int operator() (const int* integral, int ty, int fh, int fw, int& shift) const
{
int anchors[9];
anchors[0] = integral[ty];
anchors[1] = integral[ty + fw];
anchors[0] -= anchors[1];
anchors[2] = integral[ty + fw * 2];
anchors[1] -= anchors[2];
anchors[2] -= integral[ty + fw * 3];
ty += fh;
anchors[3] = integral[ty];
anchors[4] = integral[ty + fw];
anchors[3] -= anchors[4];
anchors[5] = integral[ty + fw * 2];
anchors[4] -= anchors[5];
anchors[5] -= integral[ty + fw * 3];
anchors[0] -= anchors[3];
anchors[1] -= anchors[4];
anchors[2] -= anchors[5];
// 0 - 2 contains s0 - s2
ty += fh;
anchors[6] = integral[ty];
anchors[7] = integral[ty + fw];
anchors[6] -= anchors[7];
anchors[8] = integral[ty + fw * 2];
anchors[7] -= anchors[8];
anchors[8] -= integral[ty + fw * 3];
anchors[3] -= anchors[6];
anchors[4] -= anchors[7];
anchors[5] -= anchors[8];
// 3 - 5 contains s3 - s5
anchors[0] -= anchors[4];
anchors[1] -= anchors[4];
anchors[2] -= anchors[4];
anchors[3] -= anchors[4];
anchors[5] -= anchors[4];
int response = (~(anchors[0] >> 31)) & 4;
response |= (~(anchors[1] >> 31)) & 2;;
response |= (~(anchors[2] >> 31)) & 1;
shift = (~(anchors[5] >> 31)) & 16;
shift |= (~(anchors[3] >> 31)) & 1;
ty += fh;
anchors[0] = integral[ty];
anchors[1] = integral[ty + fw];
anchors[0] -= anchors[1];
anchors[2] = integral[ty + fw * 2];
anchors[1] -= anchors[2];
anchors[2] -= integral[ty + fw * 3];
anchors[6] -= anchors[0];
anchors[7] -= anchors[1];
anchors[8] -= anchors[2];
// 0 -2 contains s6 - s8
anchors[6] -= anchors[4];
anchors[7] -= anchors[4];
anchors[8] -= anchors[4];
shift |= (~(anchors[6] >> 31)) & 2;
shift |= (~(anchors[7] >> 31)) & 4;
shift |= (~(anchors[8] >> 31)) & 8;
return response;
}
};
template<typename Pr>
__global__ void disjoin(int4* candidates, int4* objects, unsigned int n, int groupThreshold, float grouping_eps, unsigned int* nclasses)
{
unsigned int tid = threadIdx.x;
extern __shared__ int sbuff[];
int* labels = sbuff;
int* rrects = sbuff + n;
Pr predicate(grouping_eps);
partition(candidates, n, labels, predicate);
rrects[tid * 4 + 0] = 0;
rrects[tid * 4 + 1] = 0;
rrects[tid * 4 + 2] = 0;
rrects[tid * 4 + 3] = 0;
__syncthreads();
int cls = labels[tid];
Emulation::smem::atomicAdd((rrects + cls * 4 + 0), candidates[tid].x);
Emulation::smem::atomicAdd((rrects + cls * 4 + 1), candidates[tid].y);
Emulation::smem::atomicAdd((rrects + cls * 4 + 2), candidates[tid].z);
Emulation::smem::atomicAdd((rrects + cls * 4 + 3), candidates[tid].w);
__syncthreads();
labels[tid] = 0;
__syncthreads();
Emulation::smem::atomicInc((unsigned int*)labels + cls, n);
__syncthreads();
*nclasses = 0;
int active = labels[tid];
if (active)
{
int* r1 = rrects + tid * 4;
float s = 1.f / active;
r1[0] = saturate_cast<int>(r1[0] * s);
r1[1] = saturate_cast<int>(r1[1] * s);
r1[2] = saturate_cast<int>(r1[2] * s);
r1[3] = saturate_cast<int>(r1[3] * s);
}
__syncthreads();
if (active && active >= groupThreshold)
{
int* r1 = rrects + tid * 4;
int4 r_out = make_int4(r1[0], r1[1], r1[2], r1[3]);
int aidx = Emulation::smem::atomicInc(nclasses, n);
objects[aidx] = r_out;
}
}
void connectedConmonents(DevMem2D_<int4> candidates, int ncandidates, DevMem2D_<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses)
{
int block = ncandidates;
int smem = block * ( sizeof(int) + sizeof(int4) );
hipLaunchKernelGGL(( disjoin<InSameComponint>), dim3(1), dim3(block), smem, 0, candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses);
cudaSafeCall( hipGetLastError() );
}
struct Cascade
{
__host__ __device__ __forceinline__ Cascade(const Stage* _stages, int _nstages, const ClNode* _nodes, const float* _leaves,
const int* _subsets, const uchar4* _features, int _subsetSize)
: stages(_stages), nstages(_nstages), nodes(_nodes), leaves(_leaves), subsets(_subsets), features(_features), subsetSize(_subsetSize){}
__device__ __forceinline__ bool operator() (int y, int x, int* integral, const int pitch) const
{
int current_node = 0;
int current_leave = 0;
for (int s = 0; s < nstages; ++s)
{
float sum = 0;
Stage stage = stages[s];
for (int t = 0; t < stage.ntrees; t++)
{
ClNode node = nodes[current_node];
uchar4 feature = features[node.featureIdx];
int shift;
int c = evaluator(integral, (y + feature.y) * pitch + x + feature.x, feature.w * pitch, feature.z, shift);
int idx = (subsets[ current_node * subsetSize + c] & ( 1 << shift)) ? current_leave : current_leave + 1;
sum += leaves[idx];
current_node += 1;
current_leave += 2;
}
if (sum < stage.threshold)
return false;
}
return true;
}
const Stage* stages;
const int nstages;
const ClNode* nodes;
const float* leaves;
const int* subsets;
const uchar4* features;
const int subsetSize;
const LBP evaluator;
};
// stepShift, scale, width_k, sum_prev => y = sum_prev + tid_k / width_k, x = tid_k - tid_k / width_k
__global__ void lbp_cascade(const Cascade cascade, int frameW, int frameH, int windowW, int windowH, float scale, const float factor,
const int total, int* integral, const int pitch, DevMem2D_<int4> objects, unsigned int* classified)
{
int ftid = blockIdx.x * blockDim.x + threadIdx.x;
if (ftid >= total) return;
int step = (scale <= 2.f);
int windowsForLine = (__float2int_rn( __fdividef(frameW, scale)) - windowW) >> step;
int stotal = windowsForLine * ( (__float2int_rn( __fdividef(frameH, scale)) - windowH) >> step);
int wshift = 0;
int scaleTid = ftid;
while (scaleTid >= stotal)
{
scaleTid -= stotal;
wshift += __float2int_rn(__fdividef(frameW, scale)) + 1;
scale *= factor;
step = (scale <= 2.f);
windowsForLine = ( ((__float2int_rn(__fdividef(frameW, scale)) - windowW) >> step));
stotal = windowsForLine * ( (__float2int_rn(__fdividef(frameH, scale)) - windowH) >> step);
}
int y = __fdividef(scaleTid, windowsForLine);
int x = scaleTid - y * windowsForLine;
x <<= step;
y <<= step;
if (cascade(y, x + wshift, integral, pitch))
{
if(x >= __float2int_rn(__fdividef(frameW, scale)) - windowW) return;
int4 rect;
rect.x = __float2int_rn(x * scale);
rect.y = __float2int_rn(y * scale);
rect.z = __float2int_rn(windowW * scale);
rect.w = __float2int_rn(windowH * scale);
int res = atomicInc(classified, (unsigned int)objects.cols);
objects(0, res) = rect;
}
}
void classifyPyramid(int frameW, int frameH, int windowW, int windowH, float initialScale, float factor, int workAmount,
const DevMem2Db& mstages, const int nstages, const DevMem2Di& mnodes, const DevMem2Df& mleaves, const DevMem2Di& msubsets, const DevMem2Db& mfeatures,
const int subsetSize, DevMem2D_<int4> objects, unsigned int* classified, DevMem2Di integral)
{
const int block = 128;
int grid = divUp(workAmount, block);
hipFuncSetCacheConfig(lbp_cascade, hipFuncCachePreferL1);
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
hipLaunchKernelGGL(( lbp_cascade), dim3(grid), dim3(block), 0, 0, cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified);
}
}
}}}
|
b4fd707baa200d89f3d117ccfda00c41cef337a8.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/lbp.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/saturate_cast.hpp>
namespace cv { namespace gpu { namespace device
{
namespace lbp
{
struct LBP
{
__host__ __device__ __forceinline__ LBP() {}
__device__ __forceinline__ int operator() (const int* integral, int ty, int fh, int fw, int& shift) const
{
int anchors[9];
anchors[0] = integral[ty];
anchors[1] = integral[ty + fw];
anchors[0] -= anchors[1];
anchors[2] = integral[ty + fw * 2];
anchors[1] -= anchors[2];
anchors[2] -= integral[ty + fw * 3];
ty += fh;
anchors[3] = integral[ty];
anchors[4] = integral[ty + fw];
anchors[3] -= anchors[4];
anchors[5] = integral[ty + fw * 2];
anchors[4] -= anchors[5];
anchors[5] -= integral[ty + fw * 3];
anchors[0] -= anchors[3];
anchors[1] -= anchors[4];
anchors[2] -= anchors[5];
// 0 - 2 contains s0 - s2
ty += fh;
anchors[6] = integral[ty];
anchors[7] = integral[ty + fw];
anchors[6] -= anchors[7];
anchors[8] = integral[ty + fw * 2];
anchors[7] -= anchors[8];
anchors[8] -= integral[ty + fw * 3];
anchors[3] -= anchors[6];
anchors[4] -= anchors[7];
anchors[5] -= anchors[8];
// 3 - 5 contains s3 - s5
anchors[0] -= anchors[4];
anchors[1] -= anchors[4];
anchors[2] -= anchors[4];
anchors[3] -= anchors[4];
anchors[5] -= anchors[4];
int response = (~(anchors[0] >> 31)) & 4;
response |= (~(anchors[1] >> 31)) & 2;;
response |= (~(anchors[2] >> 31)) & 1;
shift = (~(anchors[5] >> 31)) & 16;
shift |= (~(anchors[3] >> 31)) & 1;
ty += fh;
anchors[0] = integral[ty];
anchors[1] = integral[ty + fw];
anchors[0] -= anchors[1];
anchors[2] = integral[ty + fw * 2];
anchors[1] -= anchors[2];
anchors[2] -= integral[ty + fw * 3];
anchors[6] -= anchors[0];
anchors[7] -= anchors[1];
anchors[8] -= anchors[2];
// 0 -2 contains s6 - s8
anchors[6] -= anchors[4];
anchors[7] -= anchors[4];
anchors[8] -= anchors[4];
shift |= (~(anchors[6] >> 31)) & 2;
shift |= (~(anchors[7] >> 31)) & 4;
shift |= (~(anchors[8] >> 31)) & 8;
return response;
}
};
template<typename Pr>
__global__ void disjoin(int4* candidates, int4* objects, unsigned int n, int groupThreshold, float grouping_eps, unsigned int* nclasses)
{
unsigned int tid = threadIdx.x;
extern __shared__ int sbuff[];
int* labels = sbuff;
int* rrects = sbuff + n;
Pr predicate(grouping_eps);
partition(candidates, n, labels, predicate);
rrects[tid * 4 + 0] = 0;
rrects[tid * 4 + 1] = 0;
rrects[tid * 4 + 2] = 0;
rrects[tid * 4 + 3] = 0;
__syncthreads();
int cls = labels[tid];
Emulation::smem::atomicAdd((rrects + cls * 4 + 0), candidates[tid].x);
Emulation::smem::atomicAdd((rrects + cls * 4 + 1), candidates[tid].y);
Emulation::smem::atomicAdd((rrects + cls * 4 + 2), candidates[tid].z);
Emulation::smem::atomicAdd((rrects + cls * 4 + 3), candidates[tid].w);
__syncthreads();
labels[tid] = 0;
__syncthreads();
Emulation::smem::atomicInc((unsigned int*)labels + cls, n);
__syncthreads();
*nclasses = 0;
int active = labels[tid];
if (active)
{
int* r1 = rrects + tid * 4;
float s = 1.f / active;
r1[0] = saturate_cast<int>(r1[0] * s);
r1[1] = saturate_cast<int>(r1[1] * s);
r1[2] = saturate_cast<int>(r1[2] * s);
r1[3] = saturate_cast<int>(r1[3] * s);
}
__syncthreads();
if (active && active >= groupThreshold)
{
int* r1 = rrects + tid * 4;
int4 r_out = make_int4(r1[0], r1[1], r1[2], r1[3]);
int aidx = Emulation::smem::atomicInc(nclasses, n);
objects[aidx] = r_out;
}
}
void connectedConmonents(DevMem2D_<int4> candidates, int ncandidates, DevMem2D_<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses)
{
int block = ncandidates;
int smem = block * ( sizeof(int) + sizeof(int4) );
disjoin<InSameComponint><<<1, block, smem>>>(candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses);
cudaSafeCall( cudaGetLastError() );
}
struct Cascade
{
__host__ __device__ __forceinline__ Cascade(const Stage* _stages, int _nstages, const ClNode* _nodes, const float* _leaves,
const int* _subsets, const uchar4* _features, int _subsetSize)
: stages(_stages), nstages(_nstages), nodes(_nodes), leaves(_leaves), subsets(_subsets), features(_features), subsetSize(_subsetSize){}
__device__ __forceinline__ bool operator() (int y, int x, int* integral, const int pitch) const
{
int current_node = 0;
int current_leave = 0;
for (int s = 0; s < nstages; ++s)
{
float sum = 0;
Stage stage = stages[s];
for (int t = 0; t < stage.ntrees; t++)
{
ClNode node = nodes[current_node];
uchar4 feature = features[node.featureIdx];
int shift;
int c = evaluator(integral, (y + feature.y) * pitch + x + feature.x, feature.w * pitch, feature.z, shift);
int idx = (subsets[ current_node * subsetSize + c] & ( 1 << shift)) ? current_leave : current_leave + 1;
sum += leaves[idx];
current_node += 1;
current_leave += 2;
}
if (sum < stage.threshold)
return false;
}
return true;
}
const Stage* stages;
const int nstages;
const ClNode* nodes;
const float* leaves;
const int* subsets;
const uchar4* features;
const int subsetSize;
const LBP evaluator;
};
// stepShift, scale, width_k, sum_prev => y = sum_prev + tid_k / width_k, x = tid_k - tid_k / width_k
__global__ void lbp_cascade(const Cascade cascade, int frameW, int frameH, int windowW, int windowH, float scale, const float factor,
const int total, int* integral, const int pitch, DevMem2D_<int4> objects, unsigned int* classified)
{
int ftid = blockIdx.x * blockDim.x + threadIdx.x;
if (ftid >= total) return;
int step = (scale <= 2.f);
int windowsForLine = (__float2int_rn( __fdividef(frameW, scale)) - windowW) >> step;
int stotal = windowsForLine * ( (__float2int_rn( __fdividef(frameH, scale)) - windowH) >> step);
int wshift = 0;
int scaleTid = ftid;
while (scaleTid >= stotal)
{
scaleTid -= stotal;
wshift += __float2int_rn(__fdividef(frameW, scale)) + 1;
scale *= factor;
step = (scale <= 2.f);
windowsForLine = ( ((__float2int_rn(__fdividef(frameW, scale)) - windowW) >> step));
stotal = windowsForLine * ( (__float2int_rn(__fdividef(frameH, scale)) - windowH) >> step);
}
int y = __fdividef(scaleTid, windowsForLine);
int x = scaleTid - y * windowsForLine;
x <<= step;
y <<= step;
if (cascade(y, x + wshift, integral, pitch))
{
if(x >= __float2int_rn(__fdividef(frameW, scale)) - windowW) return;
int4 rect;
rect.x = __float2int_rn(x * scale);
rect.y = __float2int_rn(y * scale);
rect.z = __float2int_rn(windowW * scale);
rect.w = __float2int_rn(windowH * scale);
int res = atomicInc(classified, (unsigned int)objects.cols);
objects(0, res) = rect;
}
}
void classifyPyramid(int frameW, int frameH, int windowW, int windowH, float initialScale, float factor, int workAmount,
const DevMem2Db& mstages, const int nstages, const DevMem2Di& mnodes, const DevMem2Df& mleaves, const DevMem2Di& msubsets, const DevMem2Db& mfeatures,
const int subsetSize, DevMem2D_<int4> objects, unsigned int* classified, DevMem2Di integral)
{
const int block = 128;
int grid = divUp(workAmount, block);
cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1);
Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize);
lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), integral.step / sizeof(int), objects, classified);
}
}
}}}
|
341f61936d519de708dba0b6b6a7948026242332.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <nvgraph_gdf.h>
#include <thrust/device_vector.h>
#include <ctime>
gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_SUCCESS:
return GDF_SUCCESS;
case NVGRAPH_STATUS_NOT_INITIALIZED:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_INVALID_VALUE:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
return GDF_UNSUPPORTED_DTYPE;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
return GDF_INVALID_API_CALL;
default:
return GDF_CUDA_ERROR;
}
}
gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_NOT_INITIALIZED:
std::cerr << "nvGRAPH not initialized"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ALLOC_FAILED:
std::cerr << "nvGRAPH alloc failed"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INVALID_VALUE:
std::cerr << "nvGRAPH invalid value"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ARCH_MISMATCH:
std::cerr << "nvGRAPH arch mismatch"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_MAPPING_ERROR:
std::cerr << "nvGRAPH mapping error"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_EXECUTION_FAILED:
std::cerr << "nvGRAPH execution failed"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INTERNAL_ERROR:
std::cerr << "nvGRAPH internal error"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH type not supported"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_NOT_CONVERGED:
std::cerr << "nvGRAPH algorithm failed to converge"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH graph type not supported"; return GDF_CUDA_ERROR;
default:
std::cerr << "Unknown nvGRAPH Status"; return GDF_CUDA_ERROR;
}
}
#ifdef VERBOSE
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error_verbose((call)); \
}
#else
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error((call)); \
}
#endif
gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle, gdf_graph* gdf_G, nvgraphGraphDescr_t* nvgraph_G, bool use_transposed) {
// check input
GDF_REQUIRE(!((gdf_G->edgeList == nullptr) &&
(gdf_G->adjList == nullptr) &&
(gdf_G->transposedAdjList == nullptr)),
GDF_INVALID_API_CALL);
nvgraphTopologyType_t TT; ;
hipDataType settype;
// create an nvgraph graph handle
NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G));
// setup nvgraph variables
if (use_transposed) {
// convert edgeList to transposedAdjList
if (gdf_G->transposedAdjList == nullptr) {
GDF_TRY(gdf_add_transpose(gdf_G));
}
// using exiting transposedAdjList if it exisits and if adjList is missing
TT = NVGRAPH_CSC_32;
nvgraphCSCTopology32I_st topoData;
topoData.nvertices = gdf_G->transposedAdjList->offsets->size -1;
topoData.nedges = gdf_G->transposedAdjList->indices->size;
topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data;
topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data;
// attach the transposed adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT));
//attach edge values
if (gdf_G->transposedAdjList->edge_data) {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->transposedAdjList->edge_data->data));
case GDF_FLOAT64:
settype = HIP_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->transposedAdjList->edge_data->data));
default: return GDF_UNSUPPORTED_DTYPE;
}
}
}
else {
// convert edgeList to adjList
if (gdf_G->adjList == nullptr) {
GDF_TRY(gdf_add_adj_list(gdf_G));
}
TT = NVGRAPH_CSR_32;
nvgraphCSRTopology32I_st topoData;
topoData.nvertices = gdf_G->adjList->offsets->size -1;
topoData.nedges = gdf_G->adjList->indices->size;
topoData.source_offsets = (int *) gdf_G->adjList->offsets->data;
topoData.destination_indices = (int *) gdf_G->adjList->indices->data;
// attach adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT));
//attach edge values
if (gdf_G->adjList->edge_data) {
switch (gdf_G->adjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->adjList->edge_data->data));
case GDF_FLOAT64:
settype = HIP_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->adjList->edge_data->data));
default: return GDF_UNSUPPORTED_DTYPE;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G,
const int *source_vert,
gdf_column *sssp_distances) {
std::clock_t start;
GDF_REQUIRE( gdf_G != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( *source_vert >= 0 , GDF_INVALID_API_CALL );
GDF_REQUIRE( *source_vert < sssp_distances->size , GDF_INVALID_API_CALL );
GDF_REQUIRE( sssp_distances != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( sssp_distances->data != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( !sssp_distances->valid , GDF_VALIDITY_UNSUPPORTED );
GDF_REQUIRE( sssp_distances->size > 0 , GDF_INVALID_API_CALL );
// init nvgraph
// TODO : time this call
nvgraphHandle_t nvg_handle = 0;
nvgraphGraphDescr_t nvgraph_G = 0;
hipDataType settype;
start = std::clock();
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
int sssp_index = 0;
int weight_index = 0;
thrust::device_vector<float> d_val;
start = std::clock();
if (gdf_G->transposedAdjList->edge_data == nullptr) {
// use a fp32 vector [1,...,1]
settype = HIP_R_32F;
d_val.resize(gdf_G->transposedAdjList->indices->size);
thrust::fill(d_val.begin(), d_val.end(), 1.0);
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, nvgraph_G, weight_index, settype, (void *) thrust::raw_pointer_cast(d_val.data())));
}
else {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = HIP_R_32F;
case GDF_FLOAT64:
settype = HIP_R_64F;
default: return GDF_UNSUPPORTED_DTYPE;
}
}
NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data ));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) <<std::endl; // in ms
return GDF_SUCCESS;
}
|
341f61936d519de708dba0b6b6a7948026242332.cu
|
#include <nvgraph_gdf.h>
#include <thrust/device_vector.h>
#include <ctime>
gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_SUCCESS:
return GDF_SUCCESS;
case NVGRAPH_STATUS_NOT_INITIALIZED:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_INVALID_VALUE:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
return GDF_UNSUPPORTED_DTYPE;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
return GDF_INVALID_API_CALL;
default:
return GDF_CUDA_ERROR;
}
}
gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat)
{
switch (nvg_stat) {
case NVGRAPH_STATUS_NOT_INITIALIZED:
std::cerr << "nvGRAPH not initialized"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ALLOC_FAILED:
std::cerr << "nvGRAPH alloc failed"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INVALID_VALUE:
std::cerr << "nvGRAPH invalid value"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ARCH_MISMATCH:
std::cerr << "nvGRAPH arch mismatch"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_MAPPING_ERROR:
std::cerr << "nvGRAPH mapping error"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_EXECUTION_FAILED:
std::cerr << "nvGRAPH execution failed"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INTERNAL_ERROR:
std::cerr << "nvGRAPH internal error"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH type not supported"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_NOT_CONVERGED:
std::cerr << "nvGRAPH algorithm failed to converge"; return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH graph type not supported"; return GDF_CUDA_ERROR;
default:
std::cerr << "Unknown nvGRAPH Status"; return GDF_CUDA_ERROR;
}
}
#ifdef VERBOSE
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error_verbose((call)); \
}
#else
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error((call)); \
}
#endif
gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle, gdf_graph* gdf_G, nvgraphGraphDescr_t* nvgraph_G, bool use_transposed) {
// check input
GDF_REQUIRE(!((gdf_G->edgeList == nullptr) &&
(gdf_G->adjList == nullptr) &&
(gdf_G->transposedAdjList == nullptr)),
GDF_INVALID_API_CALL);
nvgraphTopologyType_t TT; ;
cudaDataType_t settype;
// create an nvgraph graph handle
NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvgraph_G));
// setup nvgraph variables
if (use_transposed) {
// convert edgeList to transposedAdjList
if (gdf_G->transposedAdjList == nullptr) {
GDF_TRY(gdf_add_transpose(gdf_G));
}
// using exiting transposedAdjList if it exisits and if adjList is missing
TT = NVGRAPH_CSC_32;
nvgraphCSCTopology32I_st topoData;
topoData.nvertices = gdf_G->transposedAdjList->offsets->size -1;
topoData.nedges = gdf_G->transposedAdjList->indices->size;
topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data;
topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data;
// attach the transposed adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT));
//attach edge values
if (gdf_G->transposedAdjList->edge_data) {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->transposedAdjList->edge_data->data));
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->transposedAdjList->edge_data->data));
default: return GDF_UNSUPPORTED_DTYPE;
}
}
}
else {
// convert edgeList to adjList
if (gdf_G->adjList == nullptr) {
GDF_TRY(gdf_add_adj_list(gdf_G));
}
TT = NVGRAPH_CSR_32;
nvgraphCSRTopology32I_st topoData;
topoData.nvertices = gdf_G->adjList->offsets->size -1;
topoData.nedges = gdf_G->adjList->indices->size;
topoData.source_offsets = (int *) gdf_G->adjList->offsets->data;
topoData.destination_indices = (int *) gdf_G->adjList->indices->data;
// attach adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvgraph_G, (void *)&topoData, TT));
//attach edge values
if (gdf_G->adjList->edge_data) {
switch (gdf_G->adjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (float *) gdf_G->adjList->edge_data->data));
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, *nvgraph_G, 0, settype, (double *) gdf_G->adjList->edge_data->data));
default: return GDF_UNSUPPORTED_DTYPE;
}
}
}
return GDF_SUCCESS;
}
gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G,
const int *source_vert,
gdf_column *sssp_distances) {
std::clock_t start;
GDF_REQUIRE( gdf_G != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( *source_vert >= 0 , GDF_INVALID_API_CALL );
GDF_REQUIRE( *source_vert < sssp_distances->size , GDF_INVALID_API_CALL );
GDF_REQUIRE( sssp_distances != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( sssp_distances->data != nullptr , GDF_INVALID_API_CALL );
GDF_REQUIRE( !sssp_distances->valid , GDF_VALIDITY_UNSUPPORTED );
GDF_REQUIRE( sssp_distances->size > 0 , GDF_INVALID_API_CALL );
// init nvgraph
// TODO : time this call
nvgraphHandle_t nvg_handle = 0;
nvgraphGraphDescr_t nvgraph_G = 0;
cudaDataType_t settype;
start = std::clock();
NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
int sssp_index = 0;
int weight_index = 0;
thrust::device_vector<float> d_val;
start = std::clock();
if (gdf_G->transposedAdjList->edge_data == nullptr) {
// use a fp32 vector [1,...,1]
settype = CUDA_R_32F;
d_val.resize(gdf_G->transposedAdjList->indices->size);
thrust::fill(d_val.begin(), d_val.end(), 1.0);
NVG_TRY(nvgraphAttachEdgeData(nvg_handle, nvgraph_G, weight_index, settype, (void *) thrust::raw_pointer_cast(d_val.data())));
}
else {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
case GDF_FLOAT64:
settype = CUDA_R_64F;
default: return GDF_UNSUPPORTED_DTYPE;
}
}
NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data ));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) << ","; // in ms
start = std::clock();
NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));
std::cout << (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000) <<std::endl; // in ms
return GDF_SUCCESS;
}
|
e879249f7ca2afc6f9f03a98026b68fbd5429945.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if (position<size) mapad[position] = max;
}
|
e879249f7ca2afc6f9f03a98026b68fbd5429945.cu
|
#include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if (position<size) mapad[position] = max;
}
|
0d41bf5b1b433c7ea6d646f3b89a1667748caee7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "cs_cuda.h"
#include "cs_helper.h"
#include <stdlib.h>
// #define CUDA_DBG
// #define CUDA_DBG1
__global__ void d_expand_frame ( int *d_input, int *d_output,
int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames,
int o_frsize, int n_frsize, int size )
{
int frame_idx, frame_left, o_x, o_y ;
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < size )
{
frame_idx = t_idx / n_frsize ;
if ( frame_idx >= num_of_frames )
d_output[ t_idx ] = 0 ;
else
{
frame_left = t_idx % n_frsize ;
o_x = frame_left % ( xdim + xadd * 2 ) ;
if ( o_x < xadd )
o_x = 0 ;
else if ( o_x >= ( xdim + xadd ))
o_x = xdim - 1 ;
else
o_x -= xadd ;
o_y = frame_left / ( xdim + xadd * 2 ) ;
if ( o_y < yadd )
o_y = 0 ;
else if ( o_y >= ( yadd + ydim ))
o_y = ydim - 1 ;
else
o_y -= yadd ;
d_output[ t_idx ] = d_input[ frame_idx * o_frsize +
o_y * xdim + o_x ] ;
}
t_idx += CUDA_MAX_THREADS ;
}
}
/*
h_expand_frame: expand the current frames in a block
this is supposed to be the first step after pix are copied into
the device memory d_input
d_input : device address of input
should have size ( xdim * ydim * num_of_frames ) ;
d_output: device address of output
should have size ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) *
( num_of_frames + zadd )
xdim : frame H size
ydim : frame V size
xadd : size of H pix added on each side
yadd : size of V pix added on each side
zadd : size of T pix added at the end , content value is 0
num_of_frames : with data in d_input
*/
void
h_expand_frame ( int *d_input, int *d_output, int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames )
{
int o_framesize, n_framesize, n ;
int nThreadsPerBlock = 512;
int nBlocks ;
#ifdef CUDA_DBG1
fprintf( stderr, "%s: d_input %p dout %p x/y %d %d add x/y/z %d %d %d z %d\n",
__func__, d_input, d_output, xdim, ydim, xadd, yadd, zadd,
num_of_frames ) ;
#endif
o_framesize = xdim * ydim ;
n_framesize = ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) ;
n = n_framesize * ( num_of_frames + zadd ) ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: old %d new %d n %d \n",
__func__, o_framesize, n_framesize, n ) ;
#endif
h_block_adj ( n, nThreadsPerBlock, &nBlocks ) ;
// nBlocks = ( n + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
hipLaunchKernelGGL(( d_expand_frame) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
d_input, d_output,
xdim, ydim, xadd, yadd, zadd, num_of_frames,
o_framesize, n_framesize, n ) ;
hipDeviceSynchronize() ;
}
|
0d41bf5b1b433c7ea6d646f3b89a1667748caee7.cu
|
#include <stdio.h>
#include "cs_cuda.h"
#include "cs_helper.h"
#include <stdlib.h>
// #define CUDA_DBG
// #define CUDA_DBG1
__global__ void d_expand_frame ( int *d_input, int *d_output,
int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames,
int o_frsize, int n_frsize, int size )
{
int frame_idx, frame_left, o_x, o_y ;
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < size )
{
frame_idx = t_idx / n_frsize ;
if ( frame_idx >= num_of_frames )
d_output[ t_idx ] = 0 ;
else
{
frame_left = t_idx % n_frsize ;
o_x = frame_left % ( xdim + xadd * 2 ) ;
if ( o_x < xadd )
o_x = 0 ;
else if ( o_x >= ( xdim + xadd ))
o_x = xdim - 1 ;
else
o_x -= xadd ;
o_y = frame_left / ( xdim + xadd * 2 ) ;
if ( o_y < yadd )
o_y = 0 ;
else if ( o_y >= ( yadd + ydim ))
o_y = ydim - 1 ;
else
o_y -= yadd ;
d_output[ t_idx ] = d_input[ frame_idx * o_frsize +
o_y * xdim + o_x ] ;
}
t_idx += CUDA_MAX_THREADS ;
}
}
/*
h_expand_frame: expand the current frames in a block
this is supposed to be the first step after pix are copied into
the device memory d_input
d_input : device address of input
should have size ( xdim * ydim * num_of_frames ) ;
d_output: device address of output
should have size ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) *
( num_of_frames + zadd )
xdim : frame H size
ydim : frame V size
xadd : size of H pix added on each side
yadd : size of V pix added on each side
zadd : size of T pix added at the end , content value is 0
num_of_frames : with data in d_input
*/
void
h_expand_frame ( int *d_input, int *d_output, int xdim, int ydim,
int xadd, int yadd, int zadd, int num_of_frames )
{
int o_framesize, n_framesize, n ;
int nThreadsPerBlock = 512;
int nBlocks ;
#ifdef CUDA_DBG1
fprintf( stderr, "%s: d_input %p dout %p x/y %d %d add x/y/z %d %d %d z %d\n",
__func__, d_input, d_output, xdim, ydim, xadd, yadd, zadd,
num_of_frames ) ;
#endif
o_framesize = xdim * ydim ;
n_framesize = ( xdim + xadd * 2 ) * ( ydim + yadd * 2 ) ;
n = n_framesize * ( num_of_frames + zadd ) ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: old %d new %d n %d \n",
__func__, o_framesize, n_framesize, n ) ;
#endif
h_block_adj ( n, nThreadsPerBlock, &nBlocks ) ;
// nBlocks = ( n + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
d_expand_frame <<< nBlocks, nThreadsPerBlock >>>
( d_input, d_output,
xdim, ydim, xadd, yadd, zadd, num_of_frames,
o_framesize, n_framesize, n ) ;
cudaThreadSynchronize() ;
}
|
9ae0833d407131ad8052382a6aa9e935931db87f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/clat2z.cu, mixed zc -> ds, Sun Nov 20 20:20:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void slat2d_lower(
int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
}
}
/*
Similar to slat2d_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void slat2d_upper(
int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLAT2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_slat2d(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr SA, magma_int_t ldsa,
magmaDouble_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( slat2d_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( slat2d_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda);
}
}
|
9ae0833d407131ad8052382a6aa9e935931db87f.cu
|
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/clat2z.cu, mixed zc -> ds, Sun Nov 20 20:20:28 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void slat2d_lower(
int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
}
}
/*
Similar to slat2d_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void slat2d_upper(
int n,
const float *SA, int ldsa,
double *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
A[j*lda] = MAGMA_D_MAKE( MAGMA_S_REAL( SA[j*ldsa] ),
MAGMA_S_IMAG( SA[j*ldsa] ) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
SLAT2D converts a single-real matrix, SA,
to a double-real matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_slat2d(
magma_uplo_t uplo, magma_int_t n,
magmaFloat_const_ptr SA, magma_int_t ldsa,
magmaDouble_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
if (uplo == MagmaLower) {
slat2d_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda);
}
else if (uplo == MagmaUpper) {
slat2d_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda);
}
}
|
20b2d94eae78a9f9ea07bfd7a3b238d4af8b4fb1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/center_inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom,
const Dtype* label, const Dtype* center, Dtype* distance) {
CUDA_KERNEL_LOOP(index, nthreads) {
int m = index / K;
int k = index % K;
const int label_value = static_cast<int>(label[m]);
// distance(i) = x(i) - c_{y(i)}
distance[index] = bottom[index] - center[label_value * K + k];
}
}
template <typename Dtype>
__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K,
const Dtype* label, const Dtype* distance, Dtype* variation_sum,
Dtype* center_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int count = 0;
for (int m = 0; m < M; m++) {
const int label_value = static_cast<int>(label[m]);
if (label_value == index) {
count++;
for (int k = 0; k < K; k++) {
variation_sum[index * K + k] -= distance[m * K + k];
}
}
}
for (int k = 0; k < K; k++) {
center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);
}
}
}
template <typename Dtype>
void CenterInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int nthreads = M_ * K_;
hipLaunchKernelGGL(( Compute_distance_data_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);
Dtype loss = dot / M_ / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
// top[1]
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* inner_product_top_data = top[1]->mutable_cpu_data();
const Dtype* weight = this->blobs_[0]->cpu_data();
caffe_cpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., inner_product_top_data);
}
template <typename Dtype>
void CenterInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int nthreads = N_;
caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());
hipLaunchKernelGGL(( Compute_center_diff_gpu<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(),
variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
(Dtype)0., bottom[0]->mutable_cpu_diff());
} else {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
(Dtype)0., bottom[0]->mutable_cpu_diff());
}
}
/*
if (propagate_down[0]) {
caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_,
distance_.gpu_data(), bottom[0]->mutable_gpu_diff());
}
*/
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CenterInnerProductLayer);
} // namespace caffe
|
20b2d94eae78a9f9ea07bfd7a3b238d4af8b4fb1.cu
|
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/center_inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom,
const Dtype* label, const Dtype* center, Dtype* distance) {
CUDA_KERNEL_LOOP(index, nthreads) {
int m = index / K;
int k = index % K;
const int label_value = static_cast<int>(label[m]);
// distance(i) = x(i) - c_{y(i)}
distance[index] = bottom[index] - center[label_value * K + k];
}
}
template <typename Dtype>
__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K,
const Dtype* label, const Dtype* distance, Dtype* variation_sum,
Dtype* center_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int count = 0;
for (int m = 0; m < M; m++) {
const int label_value = static_cast<int>(label[m]);
if (label_value == index) {
count++;
for (int k = 0; k < K; k++) {
variation_sum[index * K + k] -= distance[m * K + k];
}
}
}
for (int k = 0; k < K; k++) {
center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.);
}
}
}
template <typename Dtype>
void CenterInnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int nthreads = M_ * K_;
Compute_distance_data_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data());
Dtype dot;
caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot);
Dtype loss = dot / M_ / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
// top[1]
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* inner_product_top_data = top[1]->mutable_cpu_data();
const Dtype* weight = this->blobs_[0]->cpu_data();
caffe_cpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., inner_product_top_data);
}
template <typename Dtype>
void CenterInnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
int nthreads = N_;
caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data());
Compute_center_diff_gpu<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(),
variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff());
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->cpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
(Dtype)0., bottom[0]->mutable_cpu_diff());
} else {
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, this->blobs_[0]->cpu_data(),
(Dtype)0., bottom[0]->mutable_cpu_diff());
}
}
/*
if (propagate_down[0]) {
caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_,
distance_.gpu_data(), bottom[0]->mutable_gpu_diff());
}
*/
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CenterInnerProductLayer);
} // namespace caffe
|
4c9062d6193dd6973a698951b3b5c9fbecc82859.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* tex1d_unnormalized.cu
*
* Microdemo to illustrate how to texture using unnormalized
* texture coordinates in the range [0..Dim), not [0..1).
*
* Build with: nvcc -I ../chLib <options> tex1dfetch_unnormalized.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
texture<float, 1> tex;
extern "C" __global__ void
TexReadout( float2 *out, size_t N, float base, float increment )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += gridDim.x*blockDim.x )
{
float x = base + (float) i * increment;
out[i].x = x;
out[i].y = tex1D( tex, x );
}
}
/*
#ifdef PRINT_HEXADECIMAL_FLOATS
printf( "(%.2f, 0x%08x)\n", outHost[i].x, *(int *) (&outHost[i].y) );
#else
printf( "(%.2f, %.2f)\n", outHost[i].x, outHost[i].y );
#endif
*/
template<class T>
void
CreateAndPrintTex( T *initTex, size_t texN, size_t outN,
float base, float increment,
hipTextureFilterMode filterMode = hipFilterModePoint,
hipTextureAddressMode addressMode = hipAddressModeClamp )
{
T *texContents = 0;
hipArray *texArray = 0;
float2 *outHost = 0, *outDevice = 0;
hipError_t status;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<T>();
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( texN*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int i = 0; i < texN; i++ ) {
texContents[i] = (T) i;
}
}
cuda(MallocArray(&texArray, &channelDesc, texN));
cuda(HostAlloc( (void **) &outHost,
outN*sizeof(float2),
hipHostMallocMapped));
cuda(HostGetDevicePointer( (void **)
&outDevice,
outHost, 0 ));
cuda(MemcpyToArray( texArray,
0, 0,
texContents,
texN*sizeof(T),
hipMemcpyHostToDevice));
cuda(BindTextureToArray(tex, texArray));
tex.filterMode = filterMode;
tex.addressMode[0] = addressMode;
cuda(HostGetDevicePointer(&outDevice, outHost, 0));
hipLaunchKernelGGL(( TexReadout), dim3(2),dim3(384), 0, 0, outDevice, outN, base, increment );
cuda(ThreadSynchronize());
for ( int i = 0; i < outN; i++ ) {
printf( "(%.2f, %.2f)\n", outHost[i].x, outHost[i].y );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
if ( texArray ) hipFreeArray( texArray );
if ( outHost ) hipHostFree( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
hipError_t status;
cuda(SetDeviceFlags(hipDeviceMapHost));
//CreateAndPrintTex<float>( NULL, 8, 8, 0.0f, 1.0f, hipFilterModePoint );
//CreateAndPrintTex<float>( NULL, 8, 8, 0.0f, 1.0f, hipFilterModeLinear );
// CreateAndPrintTex<float>( NULL, 8, 20, 0.9f, 0.01f, hipFilterModePoint );
{
float texData[10];
for ( int i = 0; i < 10; i++ ) {
texData[i] = (float) i / 10.0f;
}
// CreateAndPrintTex<float>( texData, 10, 10, 0.0f, 1.0f, hipFilterModePoint );
CreateAndPrintTex<float>( texData, 10, 10, 1.5f, 0.1f, hipFilterModeLinear );
}
ret = 0;
Error:
return ret;
}
|
4c9062d6193dd6973a698951b3b5c9fbecc82859.cu
|
/*
*
* tex1d_unnormalized.cu
*
* Microdemo to illustrate how to texture using unnormalized
* texture coordinates in the range [0..Dim), not [0..1).
*
* Build with: nvcc -I ../chLib <options> tex1dfetch_unnormalized.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <chError.h>
texture<float, 1> tex;
extern "C" __global__ void
TexReadout( float2 *out, size_t N, float base, float increment )
{
for ( size_t i = blockIdx.x*blockDim.x + threadIdx.x;
i < N;
i += gridDim.x*blockDim.x )
{
float x = base + (float) i * increment;
out[i].x = x;
out[i].y = tex1D( tex, x );
}
}
/*
#ifdef PRINT_HEXADECIMAL_FLOATS
printf( "(%.2f, 0x%08x)\n", outHost[i].x, *(int *) (&outHost[i].y) );
#else
printf( "(%.2f, %.2f)\n", outHost[i].x, outHost[i].y );
#endif
*/
template<class T>
void
CreateAndPrintTex( T *initTex, size_t texN, size_t outN,
float base, float increment,
cudaTextureFilterMode filterMode = cudaFilterModePoint,
cudaTextureAddressMode addressMode = cudaAddressModeClamp )
{
T *texContents = 0;
cudaArray *texArray = 0;
float2 *outHost = 0, *outDevice = 0;
cudaError_t status;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<T>();
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( texN*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int i = 0; i < texN; i++ ) {
texContents[i] = (T) i;
}
}
cuda(MallocArray(&texArray, &channelDesc, texN));
cuda(HostAlloc( (void **) &outHost,
outN*sizeof(float2),
cudaHostAllocMapped));
cuda(HostGetDevicePointer( (void **)
&outDevice,
outHost, 0 ));
cuda(MemcpyToArray( texArray,
0, 0,
texContents,
texN*sizeof(T),
cudaMemcpyHostToDevice));
cuda(BindTextureToArray(tex, texArray));
tex.filterMode = filterMode;
tex.addressMode[0] = addressMode;
cuda(HostGetDevicePointer(&outDevice, outHost, 0));
TexReadout<<<2,384>>>( outDevice, outN, base, increment );
cuda(ThreadSynchronize());
for ( int i = 0; i < outN; i++ ) {
printf( "(%.2f, %.2f)\n", outHost[i].x, outHost[i].y );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
if ( texArray ) cudaFreeArray( texArray );
if ( outHost ) cudaFreeHost( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
cudaError_t status;
cuda(SetDeviceFlags(cudaDeviceMapHost));
//CreateAndPrintTex<float>( NULL, 8, 8, 0.0f, 1.0f, cudaFilterModePoint );
//CreateAndPrintTex<float>( NULL, 8, 8, 0.0f, 1.0f, cudaFilterModeLinear );
// CreateAndPrintTex<float>( NULL, 8, 20, 0.9f, 0.01f, cudaFilterModePoint );
{
float texData[10];
for ( int i = 0; i < 10; i++ ) {
texData[i] = (float) i / 10.0f;
}
// CreateAndPrintTex<float>( texData, 10, 10, 0.0f, 1.0f, cudaFilterModePoint );
CreateAndPrintTex<float>( texData, 10, 10, 1.5f, 0.1f, cudaFilterModeLinear );
}
ret = 0;
Error:
return ret;
}
|
84e54df68f83c923169d2d7a03a86a0dd303a012.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../common.h"
// TODO shared memory bining (advanced)
__global__
static void hist(int nbins, int *dbins, int nvals, const int *dvals)
{
extern __shared__ int sbins[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x)
sbins[i] = 0;
__syncthreads();
int x = blockDim.x * blockIdx.x + threadIdx.x;
while (x < nvals) {
int v = dvals[x];
int bin = v % nbins;
atomicAdd(sbins + bin, 1);
x += gridDim.x * blockDim.x;
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x)
atomicAdd(dbins + i, sbins[i]);
}
int main()
{
// number of (random) input values
int nvals = 4 << 20;
int nbins = 256;
size_t vbytes = nvals * sizeof(int);
size_t bbytes = nbins * sizeof(int);
// initialize random data on cpu and place in appropriate bin
// TODO (optional) move this random data creation into a kernel (see example day1/pi)
int *h_vals = (int *)malloc(vbytes);
int *h_bins_true = (int *)malloc(bbytes); // correct bins
memset(h_bins_true, 0, bbytes);
for (int i = 0; i < nvals; ++i) {
int val = 200 * nbins * float(rand()) / RAND_MAX;
int bin = val % nbins;
h_vals[i] = val;
h_bins_true[bin]++; // check against these later
// printf("val %d bin %d\n", h_vals[i], bin);
}
printf("elements %u\n", nvals);
// allocate gpu memory and transmit input values
int *d_vals, *d_bins;
CUDA(hipMalloc(&d_vals, vbytes));
CUDA(hipMalloc(&d_bins, bbytes));
CUDA(hipMemcpy(d_vals, h_vals, vbytes, hipMemcpyHostToDevice));
// create events
hipEvent_t start, stop;
CUDA(hipEventCreate(&start));
CUDA(hipEventCreate(&stop));
// compute histogram
int threads = 256;
int blocks = 100;
CUDA(hipEventRecord(start, 0));
CUDA(hipMemset(d_bins, 0, bbytes)); // zero bins .. consider part of timing?
hipLaunchKernelGGL(( hist), dim3(blocks),dim3(threads),nbins*sizeof(int), 0, nbins, d_bins, nvals, d_vals);
CUDA(hipGetLastError());
CUDA(hipEventRecord(stop, 0));
// time kernel
CUDA(hipEventSynchronize(stop));
float time_ms = 0;
CUDA(hipEventElapsedTime(&time_ms, start, stop));
printf("time %.3f ms\n", time_ms);
// verify CPU match
int *h_bins = (int *)malloc(bbytes);
CUDA(hipMemcpy(h_bins, d_bins, bbytes, hipMemcpyDeviceToHost));
for (int i = 0; i < nbins; ++i) {
if (h_bins[i] != h_bins_true[i]) {
printf("error: invalid bin: cpu[%d]=%d gpu[%d]=%d\n",
i, h_bins_true[i], i, h_bins[i]);
break;
}
}
return 0;
}
|
84e54df68f83c923169d2d7a03a86a0dd303a012.cu
|
#include <stdio.h>
#include "../common.h"
// TODO shared memory bining (advanced)
__global__
static void hist(int nbins, int *dbins, int nvals, const int *dvals)
{
extern __shared__ int sbins[];
for (int i = threadIdx.x; i < nbins; i += blockDim.x)
sbins[i] = 0;
__syncthreads();
int x = blockDim.x * blockIdx.x + threadIdx.x;
while (x < nvals) {
int v = dvals[x];
int bin = v % nbins;
atomicAdd(sbins + bin, 1);
x += gridDim.x * blockDim.x;
}
__syncthreads();
for (int i = threadIdx.x; i < nbins; i += blockDim.x)
atomicAdd(dbins + i, sbins[i]);
}
int main()
{
// number of (random) input values
int nvals = 4 << 20;
int nbins = 256;
size_t vbytes = nvals * sizeof(int);
size_t bbytes = nbins * sizeof(int);
// initialize random data on cpu and place in appropriate bin
// TODO (optional) move this random data creation into a kernel (see example day1/pi)
int *h_vals = (int *)malloc(vbytes);
int *h_bins_true = (int *)malloc(bbytes); // correct bins
memset(h_bins_true, 0, bbytes);
for (int i = 0; i < nvals; ++i) {
int val = 200 * nbins * float(rand()) / RAND_MAX;
int bin = val % nbins;
h_vals[i] = val;
h_bins_true[bin]++; // check against these later
// printf("val %d bin %d\n", h_vals[i], bin);
}
printf("elements %u\n", nvals);
// allocate gpu memory and transmit input values
int *d_vals, *d_bins;
CUDA(cudaMalloc(&d_vals, vbytes));
CUDA(cudaMalloc(&d_bins, bbytes));
CUDA(cudaMemcpy(d_vals, h_vals, vbytes, cudaMemcpyHostToDevice));
// create events
cudaEvent_t start, stop;
CUDA(cudaEventCreate(&start));
CUDA(cudaEventCreate(&stop));
// compute histogram
int threads = 256;
int blocks = 100;
CUDA(cudaEventRecord(start, 0));
CUDA(cudaMemset(d_bins, 0, bbytes)); // zero bins .. consider part of timing?
hist<<<blocks,threads,nbins*sizeof(int)>>>(nbins, d_bins, nvals, d_vals);
CUDA(cudaGetLastError());
CUDA(cudaEventRecord(stop, 0));
// time kernel
CUDA(cudaEventSynchronize(stop));
float time_ms = 0;
CUDA(cudaEventElapsedTime(&time_ms, start, stop));
printf("time %.3f ms\n", time_ms);
// verify CPU match
int *h_bins = (int *)malloc(bbytes);
CUDA(cudaMemcpy(h_bins, d_bins, bbytes, cudaMemcpyDeviceToHost));
for (int i = 0; i < nbins; ++i) {
if (h_bins[i] != h_bins_true[i]) {
printf("error: invalid bin: cpu[%d]=%d gpu[%d]=%d\n",
i, h_bins_true[i], i, h_bins[i]);
break;
}
}
return 0;
}
|
1afaeceae278fee5286d7f6083145f80e21c2413.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <rocblas.h>
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
hipblasStatus_t status = hipblasStatus_t();
hipblasHandle_t handle = hipblasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1)/THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim) {
unsigned int id = 0;
for (unsigned int i=0; i<length; i++) {
for (unsigned int j=0; j<dim; j++) {
data[id++]= i & 255; // =i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number) {
unsigned int id = 0;
for (unsigned int i=0; i<length; i++) {
for (unsigned int j=0; j<dim; j++) {
data[id++]= number;
}
}
}
__global__ void kernelPowerTwo(const float * __restrict__ a, const float * __restrict__ b, const unsigned int length, float * __restrict__ a2, float * __restrict__ b2) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jump = gridDim.x * blockDim.x;
while (idx < length) {
float tmp = b[idx];
a2[idx] = tmp * tmp;
tmp = b[idx];
b2[idx] = tmp * tmp;;
idx += jump;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
status = hipblasCreate(&handle) ;
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
hipHostMalloc((void**)&a, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&b, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&ones, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&m, N * N * sizeof(float),hipHostMallocDefault);
hipMalloc( (void**)&da, MEMSIZE );
hipMalloc( (void**)&da2, MEMSIZE );
hipMalloc( (void**)&db, MEMSIZE );
hipMalloc( (void**)&db2, MEMSIZE );
hipMalloc( (void**)&dones, MEMSIZE );
hipMalloc( (void**)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
hipMemcpy(da, a, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(db, b, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(dones, ones, MEMSIZE, hipMemcpyHostToDevice);
//Process a -> a^2 and b->b^2
hipLaunchKernelGGL(( kernelPowerTwo), dim3(GRID_SIZE), dim3(THREAD_PER_BLOCK), 0, 0, da, db, N * dim, da2, db2);
// TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
// T -> transponuj
// N -> nedlej nic
//N,N,dim -> rozmry tch matic
alpha = 1.0f;
beta = 0.0f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, da2, dim, dones, dim, &beta, dm, N);
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
//alpha = 1.0f;
beta = 1.0f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, dones, dim, db2, dim, &beta, dm, N);
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
alpha = -2.0f;
//beta = 1.0f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, da, dim, db, dim, &beta, dm, N);
checkDeviceMatrix<float>(da, sizeof(float)*dim, N, dim, "%f ", "A");
checkDeviceMatrix<float>(da2, sizeof(float)*dim, N, dim, "%f ", "A^2");
checkDeviceMatrix<float>(db, sizeof(float)*dim, N, dim, "%f ", "B");
checkDeviceMatrix<float>(db2, sizeof(float)*dim, N, dim, "%f ", "B^2");
checkDeviceMatrix<float>(dones, sizeof(float)*dim, N, dim, "%f ", "ONES");
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
hipFree(da);
hipFree(da2);
hipFree(db);
hipFree(db2);
hipFree(dm);
hipFree(dones);
hipHostFree(a);
hipHostFree(b);
hipHostFree(m);
hipHostFree(ones);
status = hipblasDestroy(handle);
}
|
1afaeceae278fee5286d7f6083145f80e21c2413.cu
|
#include <cudaDefs.h>
#include <cublas_v2.h>
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
cublasStatus_t status = cublasStatus_t();
cublasHandle_t handle = cublasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1)/THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim) {
unsigned int id = 0;
for (unsigned int i=0; i<length; i++) {
for (unsigned int j=0; j<dim; j++) {
data[id++]= i & 255; // =i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number) {
unsigned int id = 0;
for (unsigned int i=0; i<length; i++) {
for (unsigned int j=0; j<dim; j++) {
data[id++]= number;
}
}
}
__global__ void kernelPowerTwo(const float * __restrict__ a, const float * __restrict__ b, const unsigned int length, float * __restrict__ a2, float * __restrict__ b2) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int jump = gridDim.x * blockDim.x;
while (idx < length) {
float tmp = b[idx];
a2[idx] = tmp * tmp;
tmp = b[idx];
b2[idx] = tmp * tmp;;
idx += jump;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
status = cublasCreate(&handle) ;
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
cudaHostAlloc((void**)&a, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&b, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&ones, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&m, N * N * sizeof(float),cudaHostAllocDefault);
cudaMalloc( (void**)&da, MEMSIZE );
cudaMalloc( (void**)&da2, MEMSIZE );
cudaMalloc( (void**)&db, MEMSIZE );
cudaMalloc( (void**)&db2, MEMSIZE );
cudaMalloc( (void**)&dones, MEMSIZE );
cudaMalloc( (void**)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
cudaMemcpy(da, a, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dones, ones, MEMSIZE, cudaMemcpyHostToDevice);
//Process a -> a^2 and b->b^2
kernelPowerTwo<<<GRID_SIZE, THREAD_PER_BLOCK>>>(da, db, N * dim, da2, db2);
// TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
// T -> transponuj
// N -> nedělej nic
//N,N,dim -> rozměry těch matic
alpha = 1.0f;
beta = 0.0f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, da2, dim, dones, dim, &beta, dm, N);
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
//alpha = 1.0f;
beta = 1.0f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, dones, dim, db2, dim, &beta, dm, N);
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
alpha = -2.0f;
//beta = 1.0f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, da, dim, db, dim, &beta, dm, N);
checkDeviceMatrix<float>(da, sizeof(float)*dim, N, dim, "%f ", "A");
checkDeviceMatrix<float>(da2, sizeof(float)*dim, N, dim, "%f ", "A^2");
checkDeviceMatrix<float>(db, sizeof(float)*dim, N, dim, "%f ", "B");
checkDeviceMatrix<float>(db2, sizeof(float)*dim, N, dim, "%f ", "B^2");
checkDeviceMatrix<float>(dones, sizeof(float)*dim, N, dim, "%f ", "ONES");
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
cudaFree(da);
cudaFree(da2);
cudaFree(db);
cudaFree(db2);
cudaFree(dm);
cudaFree(dones);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(m);
cudaFreeHost(ones);
status = cublasDestroy(handle);
}
|
b63b6c90cbc77fad0dd593edf1656917dda6f6bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuozblas_common.h"
// =========================================
// Split
// Based on K.Ozaki et al., "Error-free transformations of matrix multiplication
// by using fast routines of matrix multiplication and its applications", 2012
// =========================================
// Number of threads and blocks for CUDA kernels
#define SPLIT_N_NTX 32
#define SPLIT_N_NTY 16
#define SPLIT_T_NTX 512
#define SPLIT_T_NTY 1
#define SPLIT_VEC_NTX 512
#define SPLIT_VEC_NBX 512
#define CONST 0.75 // for splitting
template <typename TYPE>
__device__
TYPE NextPowTwo (const TYPE p) {
constexpr int32_t epse_type = getEpse <TYPE> ();
return scalbn (p, epse_type) - (TYPE)((scalbn (1., epse_type) - 1) * p);
}
template <typename TYPE1, typename TYPE2>
__host__ __device__
int32_t getRho (const int32_t dim, const int32_t overflowMode) {
constexpr int32_t epse_type1 = getEpse <TYPE1> ();
constexpr int32_t epse_type2 = getEpse <TYPE2> ();
if (overflowMode)
return ceil((epse_type1-(epse_type2-log2(2.*sqrt(dim)))/2)); // overflow-ver
else
return ceil((epse_type1-(epse_type2-log2(1.*dim))/2)); // standard-ver
}
// =========================================
template <typename TYPE>
__global__
void cuozblasFindMaxRKernel (
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
TYPE max, input, tmp;
__shared__ TYPE shm[nTx*nTy];
int32_t j;
if (addrx < m){
max = 0.;
for (j = iTy; j < n; j+=nTy) {
input = devInput[j * ldi + addrx];
if (max < fabs(input)) max = fabs(input);
}
shm[nTx * iTy + iTx] = max;
__syncthreads ();
if (iTy == 0) {
max = shm[iTx];
#pragma unroll
for (j = 1; j < SPLIT_N_NTY; j++) {
tmp = shm[nTx * j + iTx];
if (max < fabs(tmp)) max = fabs(tmp);
}
devMax[addrx] = max;
}
}
}
template <typename TYPE>
__global__
void cuozblasFindMaxCKernel (
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
TYPE max, input, tmp;
__shared__ TYPE shm[nTx];
if (addry < n){
max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
input = devInput[addry * ldi + i];
if (max < fabs(input)) max = fabs(input);
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
tmp = shm[iTx+i];
if (iTx < i && shm[iTx] < tmp) shm[iTx] = tmp;
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE>
__host__
void cuozblasFindMaxDevice (
const char major,
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
int32_t ntx, nty, nbx, nby;
dim3 threads, grid;
if (major == 'r') {
ntx = SPLIT_N_NTX;
nty = SPLIT_N_NTY;
nbx = ceil (float(m) / ntx);
nby = 1;
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
hipLaunchKernelGGL(( cuozblasFindMaxRKernel) , dim3(grid), dim3(threads) , 0, 0, m, n, devInput, ldi, devMax);
} else {
ntx = SPLIT_T_NTX;
nty = SPLIT_T_NTY;
nbx = 1;
nby = ceil (float(n) / nty);
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
hipLaunchKernelGGL(( cuozblasFindMaxCKernel) , dim3(grid), dim3(threads) , 0, 0, m, n, devInput, ldi, devMax);
}
}
// max for n
template <typename TYPE>
__global__
void cuozblasSplitRKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devOutput,
const int32_t ldo,
TYPE *devSplit,
const int32_t lds,
short *devSpExp,
TYPE *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
__shared__ double shm[nTx*nTy];
if (addrx < m){
const TYPE sigma = CONST * scalbn (1., rho) * NextPowTwo <TYPE> (devMax[addrx]) / splitShift;
TYPE max_ = 0.;
for (int32_t j = iTy; j < n; j += nTy) {
TYPE input = devInput[j * ldi + addrx];
const TYPE split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[j * lds + addrx] = split;
devOutput[j * ldo + addrx] = input;
max_ = MAX(max_, fabs(input));
}
shm[nTx * iTy + iTx] = max_;
__syncthreads ();
if (iTy == 0) {
max_ = shm[iTx];
#pragma unroll
for (int32_t j = 1; j < nTy; j++)
max_ = MAX(max_, fabs(shm[nTx * j + iTx]));
devMax[addrx] = max_;
}
}
}
template <typename TYPE1, typename TYPE2>
__global__
void cuozblasSplitRKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE1 * __restrict__ devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
TYPE1 *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
__shared__ double shm[nTx*nTy];
if (addrx < m){
const short tau = devSpExp[addrx] = ceil(log2(fabs(devMax[addrx])));
const TYPE1 sigma = MUL (MUL (CONST, scalbn (1., rho + tau)), splitShift);
TYPE1 max_ = 0.;
for (int32_t j = iTy; j < n; j += nTy) {
TYPE1 input = devInput[j * ldi + addrx];
const TYPE1 split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[j * lds + addrx] = scalbn(split, -tau);
devOutput[j * ldo + addrx] = input;
max_ = MAX(max_, fabs(input));
}
shm[nTx * iTy + iTx] = max_;
__syncthreads ();
if (iTy == 0) {
max_ = shm[iTx];
#pragma unroll
for (int32_t j = 1; j < nTy; j++)
max_ = MAX(max_, fabs(shm[nTx * j + iTx]));
devMax[addrx] = max_;
}
}
}
// max for m
template <typename TYPE>
__global__
void cuozblasSplitCKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devOutput,
const int32_t ldo,
TYPE *devSplit,
const int32_t lds,
short *devSpExp,
TYPE *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
__shared__ double shm[nTx];
if (addry < n){
const TYPE sigma = CONST * scalbn (1., rho) * NextPowTwo <TYPE> (devMax[addry]) / splitShift;
TYPE max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
TYPE input = devInput[addry * ldi + i];
const TYPE split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[addry * lds + i] = split;
devOutput[addry * ldo + i] = input;
max = MAX(max, fabs(input));
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
if (iTx < i && shm[iTx] < shm[iTx+i]) shm[iTx] = shm[iTx+i];
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE1, typename TYPE2>
__global__
void cuozblasSplitCKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE1 * __restrict__ devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
TYPE1 *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
__shared__ double shm[nTx];
if (addry < n){
const short tau = devSpExp[addry] = ceil(log2(fabs(devMax[addry])));
const TYPE1 sigma = MUL (MUL (CONST, scalbn (1., rho + tau)), splitShift);
TYPE1 max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
TYPE1 input = devInput[addry * ldi + i];
const TYPE1 split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[addry * lds + i] = scalbn(split, -tau);
devOutput[addry * ldo + i] = input;
max = MAX(max, fabs(input));
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
if (iTx < i && shm[iTx] < shm[iTx+i]) shm[iTx] = shm[iTx+i];
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE1, typename TYPE2>
__host__
void cuozblasSplitDevice (
cuozblasHandle_t *oh,
const char major,
const int32_t m,
const int32_t n,
const TYPE1 *devInput, // input matrix (devAwrk)
const int32_t ldi, // leading dimension of input matrix
TYPE1 *devOutput, // output matrix (devAwrk)
const int32_t ldo,
TYPE2 *devSplit, // split matrices (output): this includes NumSplitMax matrices
const int32_t lds, // leading dimension of split matrix (# of cols)
short *devSpExp, // exponent of split matrix
TYPE1 *devMax,
int32_t overflowModeFlag,
const int32_t splitShift
) {
int32_t ntx, nty, nbx, nby;
dim3 threads, grid;
const int32_t dim = (major == 'r') ? n : m;
const int32_t rho = getRho <TYPE1, TYPE2> (dim, overflowModeFlag);
if (major == 'r') {
ntx = SPLIT_N_NTX;
nty = SPLIT_N_NTY;
nbx = ceil (float(m) / ntx);
nby = 1;
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
hipLaunchKernelGGL(( cuozblasSplitRKernel) , dim3(grid), dim3(threads) , 0, 0, m, n, rho, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, splitShift);
} else {
ntx = SPLIT_T_NTX;
nty = SPLIT_T_NTY;
nbx = 1;
nby = ceil (float(n) / nty);
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
hipLaunchKernelGGL(( cuozblasSplitCKernel) , dim3(grid), dim3(threads) , 0, 0, m, n, rho, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, splitShift);
}
}
template <typename TYPE1, typename TYPE2>
__host__
int32_t cuozblasSplit (
cuozblasHandle_t *oh,
const char major,
const int32_t m,
const int32_t n,
const TYPE1 *devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
const int32_t ldse,
TYPE1 *devMax
) {
// FindMax^(0)
if ((major == 'r' && m == 1) || (major == 'c' && n == 1)) {
int32_t ptrMax = 0;
blasRiamax (oh->ch, ((major == 'r') ? n : m), devInput, 1, &ptrMax);
hipMemcpy (devMax, &devInput[ptrMax-1], sizeof(TYPE1), hipMemcpyDeviceToDevice);
} else {
cuozblasFindMaxDevice (major, m, n, devInput, ldi, devMax);
}
// Split^(0) & FindMax^(1)
cuozblasSplitDevice (oh, major, m, n, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, oh->overflowModeFlag, oh->splitShift);
const int32_t maxS = (oh->nSplitMax > 0) ? oh->nSplitMax : NumSplitDefaultMax;
int32_t s;
for (s = 1; s < maxS; s++) {
TYPE1 check = 0.;
if ((major == 'r' && m == 1) || (major == 'c' && n == 1))
hipMemcpy (&check, devMax, sizeof(TYPE1), hipMemcpyDeviceToHost);
else
blasRasum (oh->ch, ((major == 'r') ? m : n), devMax, 1, &check);
if (check == 0.) return s;
// Split^(i) & FindMax^(i+1)
cuozblasSplitDevice (oh, major, m, n, devOutput, ldo, devOutput, ldo, &devSplit[lds*n*s], lds, &devSpExp[ldse*s], devMax, oh->overflowModeFlag, oh->splitShift);
}
if (oh->splitModeFlag > 0)
fprintf (OUTPUT, "OzBLAS error: infSplit is failed.\n");
return s;
}
template int32_t cuozblasSplit <double, double> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const double *devInput, const int32_t ldi, double *devOutput, const int32_t ldo, double *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, double *devMax);
template int32_t cuozblasSplit <double, float> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const double *devInput, const int32_t ldi, double *devOutput, const int32_t ldo, float *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, double *devMax);
template int32_t cuozblasSplit <float, float> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const float *devInput, const int32_t ldi, float *devOutput, const int32_t ldo, float *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, float *devMax);
template int32_t cuozblasSplit <float, double> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const float *devInput, const int32_t ldi, float *devOutput, const int32_t ldo, double *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, float *devMax);
|
b63b6c90cbc77fad0dd593edf1656917dda6f6bb.cu
|
#include "cuozblas_common.h"
// =========================================
// Split
// Based on K.Ozaki et al., "Error-free transformations of matrix multiplication
// by using fast routines of matrix multiplication and its applications", 2012
// =========================================
// Number of threads and blocks for CUDA kernels
#define SPLIT_N_NTX 32
#define SPLIT_N_NTY 16
#define SPLIT_T_NTX 512
#define SPLIT_T_NTY 1
#define SPLIT_VEC_NTX 512
#define SPLIT_VEC_NBX 512
#define CONST 0.75 // for splitting
template <typename TYPE>
__device__
TYPE NextPowTwo (const TYPE p) {
constexpr int32_t epse_type = getEpse <TYPE> ();
return scalbn (p, epse_type) - (TYPE)((scalbn (1., epse_type) - 1) * p);
}
template <typename TYPE1, typename TYPE2>
__host__ __device__
int32_t getRho (const int32_t dim, const int32_t overflowMode) {
constexpr int32_t epse_type1 = getEpse <TYPE1> ();
constexpr int32_t epse_type2 = getEpse <TYPE2> ();
if (overflowMode)
return ceil((epse_type1-(epse_type2-log2(2.*sqrt(dim)))/2)); // overflow-ver
else
return ceil((epse_type1-(epse_type2-log2(1.*dim))/2)); // standard-ver
}
// =========================================
template <typename TYPE>
__global__
void cuozblasFindMaxRKernel (
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
TYPE max, input, tmp;
__shared__ TYPE shm[nTx*nTy];
int32_t j;
if (addrx < m){
max = 0.;
for (j = iTy; j < n; j+=nTy) {
input = devInput[j * ldi + addrx];
if (max < fabs(input)) max = fabs(input);
}
shm[nTx * iTy + iTx] = max;
__syncthreads ();
if (iTy == 0) {
max = shm[iTx];
#pragma unroll
for (j = 1; j < SPLIT_N_NTY; j++) {
tmp = shm[nTx * j + iTx];
if (max < fabs(tmp)) max = fabs(tmp);
}
devMax[addrx] = max;
}
}
}
template <typename TYPE>
__global__
void cuozblasFindMaxCKernel (
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
TYPE max, input, tmp;
__shared__ TYPE shm[nTx];
if (addry < n){
max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
input = devInput[addry * ldi + i];
if (max < fabs(input)) max = fabs(input);
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
tmp = shm[iTx+i];
if (iTx < i && shm[iTx] < tmp) shm[iTx] = tmp;
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE>
__host__
void cuozblasFindMaxDevice (
const char major,
const int32_t m,
const int32_t n,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devMax
) {
int32_t ntx, nty, nbx, nby;
dim3 threads, grid;
if (major == 'r') {
ntx = SPLIT_N_NTX;
nty = SPLIT_N_NTY;
nbx = ceil (float(m) / ntx);
nby = 1;
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
cuozblasFindMaxRKernel <<< grid, threads >>> (m, n, devInput, ldi, devMax);
} else {
ntx = SPLIT_T_NTX;
nty = SPLIT_T_NTY;
nbx = 1;
nby = ceil (float(n) / nty);
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
cuozblasFindMaxCKernel <<< grid, threads >>>(m, n, devInput, ldi, devMax);
}
}
// max for n
template <typename TYPE>
__global__
void cuozblasSplitRKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devOutput,
const int32_t ldo,
TYPE *devSplit,
const int32_t lds,
short *devSpExp,
TYPE *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
__shared__ double shm[nTx*nTy];
if (addrx < m){
const TYPE sigma = CONST * scalbn (1., rho) * NextPowTwo <TYPE> (devMax[addrx]) / splitShift;
TYPE max_ = 0.;
for (int32_t j = iTy; j < n; j += nTy) {
TYPE input = devInput[j * ldi + addrx];
const TYPE split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[j * lds + addrx] = split;
devOutput[j * ldo + addrx] = input;
max_ = MAX(max_, fabs(input));
}
shm[nTx * iTy + iTx] = max_;
__syncthreads ();
if (iTy == 0) {
max_ = shm[iTx];
#pragma unroll
for (int32_t j = 1; j < nTy; j++)
max_ = MAX(max_, fabs(shm[nTx * j + iTx]));
devMax[addrx] = max_;
}
}
}
template <typename TYPE1, typename TYPE2>
__global__
void cuozblasSplitRKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE1 * __restrict__ devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
TYPE1 *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t nTx = SPLIT_N_NTX;//blockDim.x;
const int32_t nTy = SPLIT_N_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
__shared__ double shm[nTx*nTy];
if (addrx < m){
const short tau = devSpExp[addrx] = ceil(log2(fabs(devMax[addrx])));
const TYPE1 sigma = MUL (MUL (CONST, scalbn (1., rho + tau)), splitShift);
TYPE1 max_ = 0.;
for (int32_t j = iTy; j < n; j += nTy) {
TYPE1 input = devInput[j * ldi + addrx];
const TYPE1 split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[j * lds + addrx] = scalbn(split, -tau);
devOutput[j * ldo + addrx] = input;
max_ = MAX(max_, fabs(input));
}
shm[nTx * iTy + iTx] = max_;
__syncthreads ();
if (iTy == 0) {
max_ = shm[iTx];
#pragma unroll
for (int32_t j = 1; j < nTy; j++)
max_ = MAX(max_, fabs(shm[nTx * j + iTx]));
devMax[addrx] = max_;
}
}
}
// max for m
template <typename TYPE>
__global__
void cuozblasSplitCKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE * __restrict__ devInput,
const int32_t ldi,
TYPE *devOutput,
const int32_t ldo,
TYPE *devSplit,
const int32_t lds,
short *devSpExp,
TYPE *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
__shared__ double shm[nTx];
if (addry < n){
const TYPE sigma = CONST * scalbn (1., rho) * NextPowTwo <TYPE> (devMax[addry]) / splitShift;
TYPE max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
TYPE input = devInput[addry * ldi + i];
const TYPE split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[addry * lds + i] = split;
devOutput[addry * ldo + i] = input;
max = MAX(max, fabs(input));
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
if (iTx < i && shm[iTx] < shm[iTx+i]) shm[iTx] = shm[iTx+i];
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE1, typename TYPE2>
__global__
void cuozblasSplitCKernel (
const int32_t m,
const int32_t n,
const int32_t rho,
const TYPE1 * __restrict__ devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
TYPE1 *devMax,
const int32_t splitShift
) {
const int32_t iBx = blockIdx.x;
const int32_t iBy = blockIdx.y;
const int32_t nTx = SPLIT_T_NTX;//blockDim.x;
const int32_t nTy = SPLIT_T_NTY;//blockDim.y;
const int32_t iTx = threadIdx.x;
const int32_t iTy = threadIdx.y;
const int32_t addrx = iBx * nTx + iTx;
const int32_t addry = iBy * nTy + iTy;
__shared__ double shm[nTx];
if (addry < n){
const short tau = devSpExp[addry] = ceil(log2(fabs(devMax[addry])));
const TYPE1 sigma = MUL (MUL (CONST, scalbn (1., rho + tau)), splitShift);
TYPE1 max = 0.;
for (int32_t i = addrx; i < m; i += nTx) {
TYPE1 input = devInput[addry * ldi + i];
const TYPE1 split = SUB (ADD (input, sigma), sigma);
input = SUB (input, split);
devSplit[addry * lds + i] = scalbn(split, -tau);
devOutput[addry * ldo + i] = input;
max = MAX(max, fabs(input));
}
shm[iTx] = max;
__syncthreads ();
#pragma unroll
for (int32_t i = nTx/2; i > 0; i>>=1) {
if (iTx < i && shm[iTx] < shm[iTx+i]) shm[iTx] = shm[iTx+i];
__syncthreads ();
}
if (iTx == 0) devMax[addry] = shm[0];
}
}
template <typename TYPE1, typename TYPE2>
__host__
void cuozblasSplitDevice (
cuozblasHandle_t *oh,
const char major,
const int32_t m,
const int32_t n,
const TYPE1 *devInput, // input matrix (devAwrk)
const int32_t ldi, // leading dimension of input matrix
TYPE1 *devOutput, // output matrix (devAwrk)
const int32_t ldo,
TYPE2 *devSplit, // split matrices (output): this includes NumSplitMax matrices
const int32_t lds, // leading dimension of split matrix (# of cols)
short *devSpExp, // exponent of split matrix
TYPE1 *devMax,
int32_t overflowModeFlag,
const int32_t splitShift
) {
int32_t ntx, nty, nbx, nby;
dim3 threads, grid;
const int32_t dim = (major == 'r') ? n : m;
const int32_t rho = getRho <TYPE1, TYPE2> (dim, overflowModeFlag);
if (major == 'r') {
ntx = SPLIT_N_NTX;
nty = SPLIT_N_NTY;
nbx = ceil (float(m) / ntx);
nby = 1;
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
cuozblasSplitRKernel <<< grid, threads >>> (m, n, rho, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, splitShift);
} else {
ntx = SPLIT_T_NTX;
nty = SPLIT_T_NTY;
nbx = 1;
nby = ceil (float(n) / nty);
threads = dim3 (ntx, nty);
grid = dim3 (nbx, nby);
cuozblasSplitCKernel <<< grid, threads >>> (m, n, rho, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, splitShift);
}
}
template <typename TYPE1, typename TYPE2>
__host__
int32_t cuozblasSplit (
cuozblasHandle_t *oh,
const char major,
const int32_t m,
const int32_t n,
const TYPE1 *devInput,
const int32_t ldi,
TYPE1 *devOutput,
const int32_t ldo,
TYPE2 *devSplit,
const int32_t lds,
short *devSpExp,
const int32_t ldse,
TYPE1 *devMax
) {
// FindMax^(0)
if ((major == 'r' && m == 1) || (major == 'c' && n == 1)) {
int32_t ptrMax = 0;
blasRiamax (oh->ch, ((major == 'r') ? n : m), devInput, 1, &ptrMax);
cudaMemcpy (devMax, &devInput[ptrMax-1], sizeof(TYPE1), cudaMemcpyDeviceToDevice);
} else {
cuozblasFindMaxDevice (major, m, n, devInput, ldi, devMax);
}
// Split^(0) & FindMax^(1)
cuozblasSplitDevice (oh, major, m, n, devInput, ldi, devOutput, ldo, devSplit, lds, devSpExp, devMax, oh->overflowModeFlag, oh->splitShift);
const int32_t maxS = (oh->nSplitMax > 0) ? oh->nSplitMax : NumSplitDefaultMax;
int32_t s;
for (s = 1; s < maxS; s++) {
TYPE1 check = 0.;
if ((major == 'r' && m == 1) || (major == 'c' && n == 1))
cudaMemcpy (&check, devMax, sizeof(TYPE1), cudaMemcpyDeviceToHost);
else
blasRasum (oh->ch, ((major == 'r') ? m : n), devMax, 1, &check);
if (check == 0.) return s;
// Split^(i) & FindMax^(i+1)
cuozblasSplitDevice (oh, major, m, n, devOutput, ldo, devOutput, ldo, &devSplit[lds*n*s], lds, &devSpExp[ldse*s], devMax, oh->overflowModeFlag, oh->splitShift);
}
if (oh->splitModeFlag > 0)
fprintf (OUTPUT, "OzBLAS error: infSplit is failed.\n");
return s;
}
template int32_t cuozblasSplit <double, double> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const double *devInput, const int32_t ldi, double *devOutput, const int32_t ldo, double *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, double *devMax);
template int32_t cuozblasSplit <double, float> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const double *devInput, const int32_t ldi, double *devOutput, const int32_t ldo, float *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, double *devMax);
template int32_t cuozblasSplit <float, float> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const float *devInput, const int32_t ldi, float *devOutput, const int32_t ldo, float *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, float *devMax);
template int32_t cuozblasSplit <float, double> (cuozblasHandle_t *oh, const char major, const int32_t m, const int32_t n, const float *devInput, const int32_t ldi, float *devOutput, const int32_t ldo, double *devSplit, const int32_t lds, short *devSpExp, const int32_t ldse, float *devMax);
|
b19e78add1a3269ed27994854d2e42a7469f6150.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <cuml/explainer/kernel_shap.hpp>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace ML {
namespace Explainer {
/*
* Kernel distrubutes exact part of the kernel shap dataset
* Each block scatters the data of a row of `observations` into the (number of rows of
* background) in `dataset`, based on the row of `X`.
* So, given:
* background = [[0, 1, 2],
[3, 4, 5]]
* observation = [100, 101, 102]
* X = [[1, 0, 1],
* [0, 1, 1]]
*
* dataset (output):
* [[100, 1, 102],
* [100, 4, 102]
* [0, 101, 102],
* [3, 101, 102]]
*
*
*/
template <typename DataT, typename IdxT>
__global__ void exact_rows_kernel(float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation)
{
// Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure
// data coelescing
int col = threadIdx.x;
int row = blockIdx.x * ncols;
while (col < ncols) {
// Load the X idx for the current column
int curr_X = (int)X[row + col];
// Iterate over nrows_background
for (int row_idx = blockIdx.x * nrows_background;
row_idx < blockIdx.x * nrows_background + nrows_background;
row_idx += 1) {
if (curr_X == 0) {
dataset[row_idx * ncols + col] = background[(row_idx % nrows_background) * ncols + col];
} else {
dataset[row_idx * ncols + col] = observation[col];
}
}
// Increment the column
col += blockDim.x;
}
}
/*
* Kernel distributes sampled part of the kernel shap dataset
* The first thread of each block calculates the sampling of `k` entries of `observation`
* to scatter into `dataset`. Afterwards each block scatters the data of a row of `X` into the
(number of rows of
* background) in `dataset`.
* So, given:
* background = [[0, 1, 2, 3],
[5, 6, 7, 8]]
* observation = [100, 101, 102, 103]
* nsamples = [3, 2]
*
* X (output)
* [[1, 0, 1, 1],
* [0, 1, 1, 0]]
*
* dataset (output):
* [[100, 1, 102, 103],
* [100, 6, 102, 103]
* [0, 101, 102, 3],
* [5, 101, 102, 8]]
*
*
*/
template <typename DataT, typename IdxT>
__global__ void sampled_rows_kernel(IdxT* nsamples,
float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation,
uint64_t seed)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// see what k this block will generate
int k_blk = nsamples[blockIdx.x];
// First k threads of block generate samples
if (threadIdx.x < k_blk) {
hiprandStatePhilox4_32_10_t state;
hiprand_init((unsigned long long)seed, (unsigned long long)tid, 0, &state);
int rand_idx = (int)(hiprand_uniform(&state) * ncols);
// Since X is initialized to 0, we quickly check for collisions (if k_blk << ncols the
// likelyhood of collisions is low)
while (atomicExch(&(X[2 * blockIdx.x * ncols + rand_idx]), 1) == 1) {
rand_idx = (int)(hiprand_uniform(&state) * ncols);
}
}
__syncthreads();
// Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure
// data coelescing
int col_idx = threadIdx.x;
while (col_idx < ncols) {
// Load the X idx for the current column
int curr_X = (int)X[2 * blockIdx.x * ncols + col_idx];
X[(2 * blockIdx.x + 1) * ncols + col_idx] = 1 - curr_X;
for (int bg_row_idx = 2 * blockIdx.x * nrows_background;
bg_row_idx < 2 * blockIdx.x * nrows_background + nrows_background;
bg_row_idx += 1) {
if (curr_X == 0) {
dataset[bg_row_idx * ncols + col_idx] =
background[(bg_row_idx % nrows_background) * ncols + col_idx];
} else {
dataset[bg_row_idx * ncols + col_idx] = observation[col_idx];
}
}
for (int bg_row_idx = (2 * blockIdx.x + 1) * nrows_background;
bg_row_idx < (2 * blockIdx.x + 1) * nrows_background + nrows_background;
bg_row_idx += 1) {
if (curr_X == 0) {
dataset[bg_row_idx * ncols + col_idx] = observation[col_idx];
} else {
// if(threadIdx.x == 0) printf("tid bg_row_idx: %d %d\n", tid, bg_row_idx);
dataset[bg_row_idx * ncols + col_idx] =
background[(bg_row_idx) % nrows_background * ncols + col_idx];
}
}
col_idx += blockDim.x;
}
}
template <typename DataT, typename IdxT>
void kernel_dataset_impl(const raft::handle_t& handle,
float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation,
int* nsamples,
int len_samples,
int maxsample,
uint64_t seed)
{
const auto& handle_impl = handle;
hipStream_t stream = handle_impl.get_stream();
IdxT nblks;
IdxT nthreads;
nthreads = min(512, ncols);
nblks = nrows_X - len_samples;
if (nblks > 0) {
hipLaunchKernelGGL(( exact_rows_kernel), dim3(nblks), dim3(nthreads), 0, stream,
X, nrows_X, ncols, background, nrows_background, dataset, observation);
}
CUDA_CHECK(hipPeekAtLastError());
// check if random part of the dataset is needed
if (len_samples > 0) {
nblks = len_samples / 2;
// each block does a sample and its compliment
hipLaunchKernelGGL(( sampled_rows_kernel), dim3(nblks), dim3(nthreads), 0, stream,
nsamples,
&X[(nrows_X - len_samples) * ncols],
len_samples,
ncols,
background,
nrows_background,
&dataset[(nrows_X - len_samples) * nrows_background * ncols],
observation,
seed);
}
CUDA_CHECK(hipPeekAtLastError());
}
void kernel_dataset(const raft::handle_t& handle,
float* X,
int nrows_X,
int ncols,
float* background,
int nrows_background,
float* dataset,
float* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed)
{
kernel_dataset_impl(handle,
X,
nrows_X,
ncols,
background,
nrows_background,
dataset,
observation,
nsamples,
len_nsamples,
maxsample,
seed);
}
void kernel_dataset(const raft::handle_t& handle,
float* X,
int nrows_X,
int ncols,
double* background,
int nrows_background,
double* dataset,
double* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed)
{
kernel_dataset_impl(handle,
X,
nrows_X,
ncols,
background,
nrows_background,
dataset,
observation,
nsamples,
len_nsamples,
maxsample,
seed);
}
} // namespace Explainer
} // namespace ML
|
b19e78add1a3269ed27994854d2e42a7469f6150.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <cuml/explainer/kernel_shap.hpp>
#include <curand.h>
#include <curand_kernel.h>
namespace ML {
namespace Explainer {
/*
* Kernel distrubutes exact part of the kernel shap dataset
* Each block scatters the data of a row of `observations` into the (number of rows of
* background) in `dataset`, based on the row of `X`.
* So, given:
* background = [[0, 1, 2],
[3, 4, 5]]
* observation = [100, 101, 102]
* X = [[1, 0, 1],
* [0, 1, 1]]
*
* dataset (output):
* [[100, 1, 102],
* [100, 4, 102]
* [0, 101, 102],
* [3, 101, 102]]
*
*
*/
template <typename DataT, typename IdxT>
__global__ void exact_rows_kernel(float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation)
{
// Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure
// data coelescing
int col = threadIdx.x;
int row = blockIdx.x * ncols;
while (col < ncols) {
// Load the X idx for the current column
int curr_X = (int)X[row + col];
// Iterate over nrows_background
for (int row_idx = blockIdx.x * nrows_background;
row_idx < blockIdx.x * nrows_background + nrows_background;
row_idx += 1) {
if (curr_X == 0) {
dataset[row_idx * ncols + col] = background[(row_idx % nrows_background) * ncols + col];
} else {
dataset[row_idx * ncols + col] = observation[col];
}
}
// Increment the column
col += blockDim.x;
}
}
/*
* Kernel distributes sampled part of the kernel shap dataset
* The first thread of each block calculates the sampling of `k` entries of `observation`
* to scatter into `dataset`. Afterwards each block scatters the data of a row of `X` into the
(number of rows of
* background) in `dataset`.
* So, given:
* background = [[0, 1, 2, 3],
[5, 6, 7, 8]]
* observation = [100, 101, 102, 103]
* nsamples = [3, 2]
*
* X (output)
* [[1, 0, 1, 1],
* [0, 1, 1, 0]]
*
* dataset (output):
* [[100, 1, 102, 103],
* [100, 6, 102, 103]
* [0, 101, 102, 3],
* [5, 101, 102, 8]]
*
*
*/
template <typename DataT, typename IdxT>
__global__ void sampled_rows_kernel(IdxT* nsamples,
float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation,
uint64_t seed)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// see what k this block will generate
int k_blk = nsamples[blockIdx.x];
// First k threads of block generate samples
if (threadIdx.x < k_blk) {
curandStatePhilox4_32_10_t state;
curand_init((unsigned long long)seed, (unsigned long long)tid, 0, &state);
int rand_idx = (int)(curand_uniform(&state) * ncols);
// Since X is initialized to 0, we quickly check for collisions (if k_blk << ncols the
// likelyhood of collisions is low)
while (atomicExch(&(X[2 * blockIdx.x * ncols + rand_idx]), 1) == 1) {
rand_idx = (int)(curand_uniform(&state) * ncols);
}
}
__syncthreads();
// Each block processes one row of X. Columns are iterated over by blockDim.x at a time to ensure
// data coelescing
int col_idx = threadIdx.x;
while (col_idx < ncols) {
// Load the X idx for the current column
int curr_X = (int)X[2 * blockIdx.x * ncols + col_idx];
X[(2 * blockIdx.x + 1) * ncols + col_idx] = 1 - curr_X;
for (int bg_row_idx = 2 * blockIdx.x * nrows_background;
bg_row_idx < 2 * blockIdx.x * nrows_background + nrows_background;
bg_row_idx += 1) {
if (curr_X == 0) {
dataset[bg_row_idx * ncols + col_idx] =
background[(bg_row_idx % nrows_background) * ncols + col_idx];
} else {
dataset[bg_row_idx * ncols + col_idx] = observation[col_idx];
}
}
for (int bg_row_idx = (2 * blockIdx.x + 1) * nrows_background;
bg_row_idx < (2 * blockIdx.x + 1) * nrows_background + nrows_background;
bg_row_idx += 1) {
if (curr_X == 0) {
dataset[bg_row_idx * ncols + col_idx] = observation[col_idx];
} else {
// if(threadIdx.x == 0) printf("tid bg_row_idx: %d %d\n", tid, bg_row_idx);
dataset[bg_row_idx * ncols + col_idx] =
background[(bg_row_idx) % nrows_background * ncols + col_idx];
}
}
col_idx += blockDim.x;
}
}
template <typename DataT, typename IdxT>
void kernel_dataset_impl(const raft::handle_t& handle,
float* X,
IdxT nrows_X,
IdxT ncols,
DataT* background,
IdxT nrows_background,
DataT* dataset,
DataT* observation,
int* nsamples,
int len_samples,
int maxsample,
uint64_t seed)
{
const auto& handle_impl = handle;
cudaStream_t stream = handle_impl.get_stream();
IdxT nblks;
IdxT nthreads;
nthreads = min(512, ncols);
nblks = nrows_X - len_samples;
if (nblks > 0) {
exact_rows_kernel<<<nblks, nthreads, 0, stream>>>(
X, nrows_X, ncols, background, nrows_background, dataset, observation);
}
CUDA_CHECK(cudaPeekAtLastError());
// check if random part of the dataset is needed
if (len_samples > 0) {
nblks = len_samples / 2;
// each block does a sample and its compliment
sampled_rows_kernel<<<nblks, nthreads, 0, stream>>>(
nsamples,
&X[(nrows_X - len_samples) * ncols],
len_samples,
ncols,
background,
nrows_background,
&dataset[(nrows_X - len_samples) * nrows_background * ncols],
observation,
seed);
}
CUDA_CHECK(cudaPeekAtLastError());
}
void kernel_dataset(const raft::handle_t& handle,
float* X,
int nrows_X,
int ncols,
float* background,
int nrows_background,
float* dataset,
float* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed)
{
kernel_dataset_impl(handle,
X,
nrows_X,
ncols,
background,
nrows_background,
dataset,
observation,
nsamples,
len_nsamples,
maxsample,
seed);
}
void kernel_dataset(const raft::handle_t& handle,
float* X,
int nrows_X,
int ncols,
double* background,
int nrows_background,
double* dataset,
double* observation,
int* nsamples,
int len_nsamples,
int maxsample,
uint64_t seed)
{
kernel_dataset_impl(handle,
X,
nrows_X,
ncols,
background,
nrows_background,
dataset,
observation,
nsamples,
len_nsamples,
maxsample,
seed);
}
} // namespace Explainer
} // namespace ML
|
435781b9da3db13a9db0bd3ac98bb15fc1cdae44.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
- -* (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ali Charara ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
* Technology nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include "kblas.h"
#include "Xtr_common.ch"
#include "operators.h"
//==============================================================================================
hipblasStatus_t cublasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return hipblasStrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
hipblasStatus_t cublasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return hipblasDtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
hipblasStatus_t cublasXtrsm (hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
return hipblasCtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
hipblasStatus_t cublasXtrsm (hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
return hipblasZtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
//==============================================================================================
#define WARP 32
#define WARP1 33
#define WARP2 34
#define tx threadIdx.x
#define ty threadIdx.y
//==============================================================================================
int kblas_trsm_ib_cublas = 128;
bool kblas_trsm_use_custom = 0;
int kblas_trsm_ib_data = 512;
#define SIMPLE_SIZE(n) ( ((n) < WARP) || ( ((n) % WARP == 0) && ( (n) <= kblas_trsm_ib_cublas ) ) )
#define SIMPLE_SIZE_DATA(n) ( (n) <= kblas_trsm_ib_data )
//==============================================================================================
//shuffle intrinsic is not supported before KEPLER
#if (SM >= 30)
template<typename T, int WARPS_PER_BLOCK, bool LOWER, bool TRANS, bool CONJG, bool UNIT>
__global__ void //__launch_bounds__(WARP * WARPS_PER_BLOCK)
trsm_mul32_L(int M, int N, T alpha, const T* __restrict__ A, int incA, T* B, int incB, int mb)
{
const int A_COLS_PER_WARP = WARP / WARPS_PER_BLOCK;
const bool forward = (LOWER != TRANS);
//setup shared memory
__shared__ T sA[WARP * WARP1];//strided to avoid bank conflicts
int txyw = tx + ty * WARP1, txyiA = tx + ty * incA, txyiB = tx + ty * incB, jtxw;
int l, c, r, startB = 0, i;
T rB, s, rBj, a[4], b[4], *sAA, *BB;
for(startB = 0; startB < N; startB += gridDim.x * WARPS_PER_BLOCK)
{
if( (blockIdx.x * WARPS_PER_BLOCK + startB) >= N)
return;
BB = B + (blockIdx.x * WARPS_PER_BLOCK + startB) * incB;
//checking boundary case, the column indices of B this warp is computing
//if not active, this warp will only participate in fetching A sub-matrices, will not compute
bool active = ( (blockIdx.x * WARPS_PER_BLOCK + startB + ty) < N );
for(c = (forward ? 0 : mb-1); (forward && c < mb) || (!forward && c >= 0); c += (forward ? 1 : -1))
{
s = make_zero<T>();
for(r = (forward ? 0 : mb-1); (forward && r < c) || (!forward && r > c); r += (forward ? 1 : -1))
{
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
if(TRANS)
//load A(r,c)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (r + c * incA) + l * WARPS_PER_BLOCK * incA];
else
//load A(c,r)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (c + r * incA) + l * WARPS_PER_BLOCK * incA];
}
//load B(r)
if(active)
rB = BB[txyiB + WARP * r];
__syncthreads();
if(active){
//gemm A(r,c)/A(c,r) & B(r) onto B(c) held at s
if(TRANS)
sAA = sA + tx*WARP1;
else
sAA = sA + tx;
#pragma unroll
for(int j = 0; j < WARP; j+=4){
if(TRANS){
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = CONJG ? conjugate(sAA[j + i]) : sAA[j + i];
}else{
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = sAA[(j + i)*WARP1];
}
#pragma unroll
for(i = 0; i < 4; i++)
b[i] = shfl(rB, j + i);
#pragma unroll
for(i = 0; i < 4; i++)
s = FMA( a[i], b[i], s );
}
}
__syncthreads();
}
//load A(c,c) from global to shared mem
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * c * (incA + 1) + l * WARPS_PER_BLOCK * incA];
}
//load B(c) into registers
if(active){
rB = BB[txyiB + WARP * c];
}
__syncthreads();
if(active)
{
//perform trsm on shared mem
if(!LOWER && TRANS)
jtxw = tx * WARP1;
else
if(!LOWER && !TRANS)
jtxw = tx + (WARP - 1) * WARP1;
else
if(LOWER && TRANS)
jtxw = tx * WARP1 + (WARP - 1);
else
if(LOWER && !TRANS)
jtxw = tx;
#pragma unroll
for(int j = (forward ? 0 : WARP-1); (forward && (j < WARP)) || (!forward && (j >= 0)); j += (forward ? 1 : -1)){
if(j == tx){
rB = FMA(alpha, rB, -s);//TODO
if(!UNIT){
a[0] = (TRANS && CONJG) ? conjugate(sA[tx * WARP2]) : sA[tx * WARP2];//diagonal element
rB = rB / a[0];//TODO
}
}
rBj = shfl(rB, j);
if( (forward && (j < tx)) || (!forward && (j > tx)) ){
a[0] = (TRANS && CONJG) ? conjugate(sA[jtxw]) : sA[jtxw];
s = FMA(a[0], rBj, s);
}
jtxw += (TRANS ? 1 : WARP1) * (forward ? 1 : -1);
}
//store back B(c) to global mem
BB[txyiB + WARP * c] = rB;
}
__syncthreads();
}
}
}
//==============================================================================================
#define TRSM_NUM_VARIANTS 4
#define TRSM_kernel_variants(__WPB) \
trsm_mul32_L<T, __WPB, true, false, false, false>, \
trsm_mul32_L<T, __WPB, true, true, false, false>, \
trsm_mul32_L<T, __WPB, false, false, false, false>, \
trsm_mul32_L<T, __WPB, false, true, false, false>
/*,TODO
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, true, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, true, false>*/
template<class T>
hipblasStatus_t Xtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
//handle odd cases with cublas
if( (*alpha == make_zero<T>())
|| (!kblas_trsm_use_custom)
|| (side == HIPBLAS_SIDE_LEFT && m < WARP)
|| (side == HIPBLAS_SIDE_RIGHT/* && n < WARP*/))//TODO
{
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
typedef void (*trsm_kernels_type)(int M, int N, T alpha, const T* A, int incA, T* B, int incB, int mb);
#define WARPS_PER_BLOCK 8
#define B_COLS_PER_WARP 1
trsm_kernels_type trsm_kernels[TRSM_NUM_VARIANTS] = {// T, WARPS_PER_BLOCK, LOWER, TRANS, CONJG, UNIT
TRSM_kernel_variants(WARPS_PER_BLOCK)
};
hipStream_t curStream;
hipblasStatus_t status;
if((status = hipblasGetStream( handle, &curStream )) != HIPBLAS_STATUS_SUCCESS ) return status;
if( ((side == HIPBLAS_SIDE_LEFT) && (m % WARP == 0)) /*|| ((side == HIPBLAS_SIDE_RIGHT) && (n % WARP == 0))*/ )//TODO
{
int func_idx = /*4*(side == HIPBLAS_SIDE_RIGHT) + */2*(uplo == HIPBLAS_FILL_MODE_UPPER) + (trans != HIPBLAS_OP_N);// + (diag == HIPBLAS_DIAG_UNIT);TODO
dim3 blockDim( WARP, WARPS_PER_BLOCK );
dim3 gridDim(
(side == HIPBLAS_SIDE_LEFT) * (n / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (n % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))
/*+TODO
(side == HIPBLAS_SIDE_RIGHT) * (m / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (m % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))*/
, 1);
int mb = (side == HIPBLAS_SIDE_LEFT) * m / WARP /*+ (side == HIPBLAS_SIDE_RIGHT) * n / WARP*/;//TODO
trsm_kernelshipLaunchKernelGGL(([func_idx)], dim3(gridDim), dim3(blockDim), 0, curStream, m, n, *alpha, A, incA, B, incB, mb);
if(!_kblas_error( (hipGetLastError()), __func__, __FILE__, __LINE__ ))
return HIPBLAS_STATUS_EXECUTION_FAILED;
}else{
//error: we should not reach this case
return HIPBLAS_STATUS_INTERNAL_ERROR;
}
return HIPBLAS_STATUS_SUCCESS;
}
#else
template<class T>
hipblasStatus_t Xtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
//handle with cublas
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
#endif
//==============================================================================================
template<typename T>
hipblasStatus_t kblasXtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
hipblasStatus_t status;
hipblasOperation_t noTrans = HIPBLAS_OP_N;//Trans = HIPBLAS_OP_T,
if( (*alpha == make_zero<T>())//TODO
|| ( (side == HIPBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == HIPBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
return Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
else
if(side == HIPBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, A+m1*incA, incA,
B+m1, incB,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, A+m1*incA, incA,
B, incB,
alpha, B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, A+m1, incA,
B, incB,
alpha, B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, A+m1, incA,
B+m1, incB,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == KBLAS_Upper){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, B, incB,
A+n1*incA, incA,
alpha, B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
&one, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, B+n1*incB, incB,
A+n1*incA, incA,
&one, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, B+n1*incB, incB,
A+n1, incA,
alpha, B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
&one, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, B, incB,
A+n1, incA,
&one, B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<typename T>
hipblasStatus_t kblasXtrsm(hipblasHandle_t handle, hipStream_t &strIn, hipStream_t &strOut,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int incA, T* d_A,
T *h_B, int incB, T* d_B,
bool BIsIn, bool getBOut, bool AIsIn)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
hipblasStatus_t status;
hipblasOperation_t noTrans = HIPBLAS_OP_N;//Trans = HIPBLAS_OP_T,
hipEvent_t eDataIn, eComp;
check_error( hipEventCreateWithFlags(&eDataIn, hipEventDisableTiming), HIPBLAS_STATUS_EXECUTION_FAILED);
check_error( hipEventCreateWithFlags(&eComp, hipEventDisableTiming), HIPBLAS_STATUS_EXECUTION_FAILED);
hipStream_t strComp;
check_error( hipblasGetStream(handle, &strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
if( (*alpha == make_zero<T>())//TODO
|| ( (side == HIPBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == HIPBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
int Am = (side == HIPBLAS_SIDE_LEFT) ? m : n;
//if B is not already in, copy in B block
if(!BIsIn)
check_error( status = hipblasSetMatrixAsync( m, n, sizeof(T), h_B, incB, d_B, incB, strIn ), status);
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( Am, Am, sizeof(T), h_A, incA, d_A, incA, strIn ), status);
//wait for data to arrive
if(!AIsIn || !BIsIn){
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if( (status = Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, d_A, incA,
d_B, incB ) ) != HIPBLAS_STATUS_SUCCESS ) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
}
else
if(side == HIPBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) || (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error( status = hipblasSetMatrixAsync( m, m, sizeof(T), h_A, incA, d_A, incA, strIn), status);
AIsIn = true;
}
if( (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error( status = hipblasSetMatrixAsync( m, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if(uplo == HIPBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, incB, h_B+m1, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m1, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*incA, incA, d_A+m1*incA, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1*incA, incA,
d_B+m1, incB,
alpha, d_B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m1, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, incB, d_B+m1, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*incA, incA, d_A+m1*incA, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1*incA, incA,
d_B, incB,
alpha, d_B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m1, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, incB, d_B+m1, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( m2, m1, sizeof(T), h_A+m1, incA, d_A+m1, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1, incA,
d_B, incB,
alpha, d_B+m1, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, incB, h_B+m1, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m1, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1, incA, d_A+m1, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1, incA,
d_B+m1, incB,
alpha, d_B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == KBLAS_Upper){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n1, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*incB, incB, d_B+n1*incB, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*incA, incA, d_A+n1*incA, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, d_B, incB,
d_A+n1*incA, incA,
alpha, d_B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
&one, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*incB, incB, h_B+n1*incB, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m, n1, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1*incA, incA, d_A+n1*incA, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, d_B+n1*incB, incB,
d_A+n1*incA, incA,
&one, d_B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == HIPBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*incB, incB, h_B+n1*incB, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m, n1, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1, incA, d_A+n1, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, d_B+n1*incB, incB,
d_A+n1, incA,
alpha, d_B, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( hipEventRecord(eComp, strComp), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strOut, eComp, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n1, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = hipblasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*incB, incB, d_B+n1*incB, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = hipblasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1, incA, d_A+n1, incA, strIn), status);
//wait for data to arrive
check_error( hipEventRecord(eDataIn, strIn), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamWaitEvent(strComp, eDataIn, 0), HIPBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, d_B, incB,
d_A+n1, incA,
&one, d_B+n1*incB, incB
)) != HIPBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, getBOut, AIsIn
)) != HIPBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
check_error( hipEventDestroy( eDataIn ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipEventDestroy( eComp ), HIPBLAS_STATUS_INTERNAL_ERROR);
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
//#define DO_INLINE_BOUT 0
template<class T>
hipblasStatus_t kblasXtrsm_cpu(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int incA,
T *h_B, int incB){
//allocate memory on device
T *d_A, *d_B;
int Am, An, Bm, Bn;
if ( side == HIPBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
/*check_error( hipHostRegister((void*)h_A, Am * An * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipHostRegister((void*)h_B, Bm * Bn * sizeof(T), hipHostRegisterDefault), HIPBLAS_STATUS_INTERNAL_ERROR);*/
hipblasStatus_t status;
//*
int AsyncEngineCount, devID;
check_error( hipGetDevice(&devID), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipDeviceGetAttribute(&AsyncEngineCount, hipDeviceAttributeAsyncEngineCount, devID), HIPBLAS_STATUS_INTERNAL_ERROR);
bool DO_INLINE_BOUT = AsyncEngineCount > 1;
//*/
check_error( hipMalloc( (void**)&d_A, (Am*An)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipMalloc( (void**)&d_B, (Bm*Bn)*sizeof(T) ), HIPBLAS_STATUS_INTERNAL_ERROR);
//setup streams
hipStream_t inStream, outStream;
check_error( hipStreamCreateWithFlags( &inStream, hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT)
check_error( hipStreamCreateWithFlags( &outStream, hipStreamNonBlocking), HIPBLAS_STATUS_INTERNAL_ERROR );
//call cpu API trmm
check_error(
(status = kblasXtrsm(handle, inStream, outStream,
side, uplo, trans,diag,
m, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
false, DO_INLINE_BOUT, false)
), status);
//sync streams
if(DO_INLINE_BOUT){
check_error( hipStreamSynchronize( outStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
}else{
hipStream_t compStream;
check_error( hipblasGetStream(handle, &compStream), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( hipStreamSynchronize( compStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
check_error( status = hipblasGetMatrixAsync( m, n, sizeof(T), d_B, incB, h_B, incB, inStream), status);
}
//revoke streams
check_error( hipStreamDestroy( inStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT)
check_error( hipStreamDestroy( outStream ), HIPBLAS_STATUS_INTERNAL_ERROR);
/*check_error( hipHostUnregister( (void*)h_A ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error( hipHostUnregister( (void*)h_B ), HIPBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error( hipFree( d_A ), HIPBLAS_STATUS_INTERNAL_ERROR );
check_error( hipFree( d_B ), HIPBLAS_STATUS_INTERNAL_ERROR );
return HIPBLAS_STATUS_SUCCESS;
}
//==============================================================================================
/*extern "C" {
int kblas_strsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB,
hipStream_t stream){
check_error(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB,
hipStream_t stream){
check_error(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int incA,
hipComplex *B, int incB,
hipStream_t stream){
check_error(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int incA,
hipDoubleComplex *B, int incB,
hipStream_t stream){
check_error(hipblasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_strsm(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm(
char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int incA,
hipComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm(
char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int incA,
hipDoubleComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
}*/
//==============================================================================================
#define kblasXtrsm_async_BODY { \
\
hipblasHandle_t cublas_handle; \
if( hipblasCreate(&cublas_handle) != HIPBLAS_STATUS_SUCCESS ) return; \
if( hipblasSetStream(cublas_handle, stream) != HIPBLAS_STATUS_SUCCESS ){ \
hipblasDestroy(cublas_handle); \
return; \
} \
hipblasSideMode_t side_v2 = (side == KBLAS_Left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT); \
hipblasFillMode_t uplo_v2 = (uplo == KBLAS_Lower ? HIPBLAS_FILL_MODE_LOWER : HIPBLAS_FILL_MODE_UPPER); \
hipblasOperation_t trans_v2 = (trans == KBLAS_Trans ? HIPBLAS_OP_T : HIPBLAS_OP_N); \
hipblasDiagType_t diag_v2 = (diag == KBLAS_Unit ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT); \
\
kblasXtrsm(cublas_handle, \
side_v2, uplo_v2, trans_v2, diag_v2, \
m, n, \
&alpha, A, lda, \
B, ldb); \
\
hipblasDestroy(cublas_handle); \
}
extern "C"{
void kblasStrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasDtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasCtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int lda,
hipComplex *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
void kblasZtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb,
hipStream_t stream){
kblasXtrsm_async_BODY
}
}
//==============================================================================================
void kblasStrsm(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb){
kblasStrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasDtrsm(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb){
kblasDtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasCtrsm(char side, char uplo, char trans, char diag,
int m, int n,
hipComplex alpha, const hipComplex *A, int lda,
hipComplex *B, int ldb){
kblasCtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasZtrsm(char side, char uplo, char trans, char diag,
int m, int n,
hipDoubleComplex alpha, const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
kblasZtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
//==============================================================================================
hipblasStatus_t kblasStrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasDtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasCtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblasZtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
hipblasStatus_t kblas_strsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_dtrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_ctrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipComplex *alpha,
const hipComplex *A, int lda,
hipComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
hipblasStatus_t kblas_ztrsm(hipblasHandle_t handle,
hipblasSideMode_t side, hipblasFillMode_t uplo,
hipblasOperation_t trans, hipblasDiagType_t diag,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
hipDoubleComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
|
435781b9da3db13a9db0bd3ac98bb15fc1cdae44.cu
|
/**
- -* (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ali Charara ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
* Technology nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include "kblas.h"
#include "Xtr_common.ch"
#include "operators.h"
//==============================================================================================
cublasStatus_t cublasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return cublasStrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
cublasStatus_t cublasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return cublasDtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
cublasStatus_t cublasXtrsm (cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
return cublasCtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
cublasStatus_t cublasXtrsm (cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
return cublasZtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb );
}
//==============================================================================================
#define WARP 32
#define WARP1 33
#define WARP2 34
#define tx threadIdx.x
#define ty threadIdx.y
//==============================================================================================
int kblas_trsm_ib_cublas = 128;
bool kblas_trsm_use_custom = 0;
int kblas_trsm_ib_data = 512;
#define SIMPLE_SIZE(n) ( ((n) < WARP) || ( ((n) % WARP == 0) && ( (n) <= kblas_trsm_ib_cublas ) ) )
#define SIMPLE_SIZE_DATA(n) ( (n) <= kblas_trsm_ib_data )
//==============================================================================================
//shuffle intrinsic is not supported before KEPLER
#if (SM >= 30)
template<typename T, int WARPS_PER_BLOCK, bool LOWER, bool TRANS, bool CONJG, bool UNIT>
__global__ void //__launch_bounds__(WARP * WARPS_PER_BLOCK)
trsm_mul32_L(int M, int N, T alpha, const T* __restrict__ A, int incA, T* B, int incB, int mb)
{
const int A_COLS_PER_WARP = WARP / WARPS_PER_BLOCK;
const bool forward = (LOWER != TRANS);
//setup shared memory
__shared__ T sA[WARP * WARP1];//strided to avoid bank conflicts
int txyw = tx + ty * WARP1, txyiA = tx + ty * incA, txyiB = tx + ty * incB, jtxw;
int l, c, r, startB = 0, i;
T rB, s, rBj, a[4], b[4], *sAA, *BB;
for(startB = 0; startB < N; startB += gridDim.x * WARPS_PER_BLOCK)
{
if( (blockIdx.x * WARPS_PER_BLOCK + startB) >= N)
return;
BB = B + (blockIdx.x * WARPS_PER_BLOCK + startB) * incB;
//checking boundary case, the column indices of B this warp is computing
//if not active, this warp will only participate in fetching A sub-matrices, will not compute
bool active = ( (blockIdx.x * WARPS_PER_BLOCK + startB + ty) < N );
for(c = (forward ? 0 : mb-1); (forward && c < mb) || (!forward && c >= 0); c += (forward ? 1 : -1))
{
s = make_zero<T>();
for(r = (forward ? 0 : mb-1); (forward && r < c) || (!forward && r > c); r += (forward ? 1 : -1))
{
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
if(TRANS)
//load A(r,c)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (r + c * incA) + l * WARPS_PER_BLOCK * incA];
else
//load A(c,r)
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * (c + r * incA) + l * WARPS_PER_BLOCK * incA];
}
//load B(r)
if(active)
rB = BB[txyiB + WARP * r];
__syncthreads();
if(active){
//gemm A(r,c)/A(c,r) & B(r) onto B(c) held at s
if(TRANS)
sAA = sA + tx*WARP1;
else
sAA = sA + tx;
#pragma unroll
for(int j = 0; j < WARP; j+=4){
if(TRANS){
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = CONJG ? conjugate(sAA[j + i]) : sAA[j + i];
}else{
#pragma unroll
for(i = 0; i < 4; i++)
a[i] = sAA[(j + i)*WARP1];
}
#pragma unroll
for(i = 0; i < 4; i++)
b[i] = shfl(rB, j + i);
#pragma unroll
for(i = 0; i < 4; i++)
s = FMA( a[i], b[i], s );
}
}
__syncthreads();
}
//load A(c,c) from global to shared mem
#pragma unroll
for(l = 0; l < A_COLS_PER_WARP; l++){
sA[txyw + l * WARPS_PER_BLOCK * WARP1] = A[txyiA + WARP * c * (incA + 1) + l * WARPS_PER_BLOCK * incA];
}
//load B(c) into registers
if(active){
rB = BB[txyiB + WARP * c];
}
__syncthreads();
if(active)
{
//perform trsm on shared mem
if(!LOWER && TRANS)
jtxw = tx * WARP1;
else
if(!LOWER && !TRANS)
jtxw = tx + (WARP - 1) * WARP1;
else
if(LOWER && TRANS)
jtxw = tx * WARP1 + (WARP - 1);
else
if(LOWER && !TRANS)
jtxw = tx;
#pragma unroll
for(int j = (forward ? 0 : WARP-1); (forward && (j < WARP)) || (!forward && (j >= 0)); j += (forward ? 1 : -1)){
if(j == tx){
rB = FMA(alpha, rB, -s);//TODO
if(!UNIT){
a[0] = (TRANS && CONJG) ? conjugate(sA[tx * WARP2]) : sA[tx * WARP2];//diagonal element
rB = rB / a[0];//TODO
}
}
rBj = shfl(rB, j);
if( (forward && (j < tx)) || (!forward && (j > tx)) ){
a[0] = (TRANS && CONJG) ? conjugate(sA[jtxw]) : sA[jtxw];
s = FMA(a[0], rBj, s);
}
jtxw += (TRANS ? 1 : WARP1) * (forward ? 1 : -1);
}
//store back B(c) to global mem
BB[txyiB + WARP * c] = rB;
}
__syncthreads();
}
}
}
//==============================================================================================
#define TRSM_NUM_VARIANTS 4
#define TRSM_kernel_variants(__WPB) \
trsm_mul32_L<T, __WPB, true, false, false, false>, \
trsm_mul32_L<T, __WPB, true, true, false, false>, \
trsm_mul32_L<T, __WPB, false, false, false, false>, \
trsm_mul32_L<T, __WPB, false, true, false, false>
/*,TODO
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, true, true, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, false, false>,
trsm_mul32_R<T, WARPS_PER_BLOCK, B_COLS_PER_WARP, false, true, false>*/
template<class T>
cublasStatus_t Xtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
//handle odd cases with cublas
if( (*alpha == make_zero<T>())
|| (!kblas_trsm_use_custom)
|| (side == CUBLAS_SIDE_LEFT && m < WARP)
|| (side == CUBLAS_SIDE_RIGHT/* && n < WARP*/))//TODO
{
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
typedef void (*trsm_kernels_type)(int M, int N, T alpha, const T* A, int incA, T* B, int incB, int mb);
#define WARPS_PER_BLOCK 8
#define B_COLS_PER_WARP 1
trsm_kernels_type trsm_kernels[TRSM_NUM_VARIANTS] = {// T, WARPS_PER_BLOCK, LOWER, TRANS, CONJG, UNIT
TRSM_kernel_variants(WARPS_PER_BLOCK)
};
cudaStream_t curStream;
cublasStatus_t status;
if((status = cublasGetStream( handle, &curStream )) != CUBLAS_STATUS_SUCCESS ) return status;
if( ((side == CUBLAS_SIDE_LEFT) && (m % WARP == 0)) /*|| ((side == CUBLAS_SIDE_RIGHT) && (n % WARP == 0))*/ )//TODO
{
int func_idx = /*4*(side == CUBLAS_SIDE_RIGHT) + */2*(uplo == CUBLAS_FILL_MODE_UPPER) + (trans != CUBLAS_OP_N);// + (diag == CUBLAS_DIAG_UNIT);TODO
dim3 blockDim( WARP, WARPS_PER_BLOCK );
dim3 gridDim(
(side == CUBLAS_SIDE_LEFT) * (n / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (n % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))
/*+TODO
(side == CUBLAS_SIDE_RIGHT) * (m / (WARPS_PER_BLOCK * B_COLS_PER_WARP) + (m % (WARPS_PER_BLOCK * B_COLS_PER_WARP) > 0))*/
, 1);
int mb = (side == CUBLAS_SIDE_LEFT) * m / WARP /*+ (side == CUBLAS_SIDE_RIGHT) * n / WARP*/;//TODO
trsm_kernels[func_idx]<<< gridDim, blockDim, 0, curStream>>> (m, n, *alpha, A, incA, B, incB, mb);
if(!_kblas_error( (cudaGetLastError()), __func__, __FILE__, __LINE__ ))
return CUBLAS_STATUS_EXECUTION_FAILED;
}else{
//error: we should not reach this case
return CUBLAS_STATUS_INTERNAL_ERROR;
}
return CUBLAS_STATUS_SUCCESS;
}
#else
template<class T>
cublasStatus_t Xtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB){
//handle with cublas
return cublasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
#endif
//==============================================================================================
template<typename T>
cublasStatus_t kblasXtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *A, int incA,
T *B, int incB)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
cublasStatus_t status;
cublasOperation_t noTrans = CUBLAS_OP_N;//Trans = CUBLAS_OP_T,
if( (*alpha == make_zero<T>())//TODO
|| ( (side == CUBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == CUBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
return Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB );
}
else
if(side == CUBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, A+m1*incA, incA,
B+m1, incB,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, A+m1*incA, incA,
B, incB,
alpha, B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, A+m1, incA,
B, incB,
alpha, B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
&one, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m2, n,
alpha, A+m1+m1*incA, incA,
B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, A+m1, incA,
B+m1, incB,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m1, n,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == KBLAS_Upper){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, B, incB,
A+n1*incA, incA,
alpha, B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
&one, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, B+n1*incB, incB,
A+n1*incA, incA,
&one, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, B+n1*incB, incB,
A+n1, incA,
alpha, B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
&one, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n1,
alpha, A, incA,
B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, B, incB,
A+n1, incA,
&one, B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle,
side, uplo, trans, diag,
m, n2,
alpha, A+n1+n1*incA, incA,
B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
template<typename T>
cublasStatus_t kblasXtrsm(cublasHandle_t handle, cudaStream_t &strIn, cudaStream_t &strOut,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int incA, T* d_A,
T *h_B, int incB, T* d_B,
bool BIsIn, bool getBOut, bool AIsIn)
{
T one = make_one<T>();
T mone = make_zero<T>() - one;
T mInvAlpha = mone / *alpha;
cublasStatus_t status;
cublasOperation_t noTrans = CUBLAS_OP_N;//Trans = CUBLAS_OP_T,
cudaEvent_t eDataIn, eComp;
check_error( cudaEventCreateWithFlags(&eDataIn, cudaEventDisableTiming), CUBLAS_STATUS_EXECUTION_FAILED);
check_error( cudaEventCreateWithFlags(&eComp, cudaEventDisableTiming), CUBLAS_STATUS_EXECUTION_FAILED);
cudaStream_t strComp;
check_error( cublasGetStream_v2(handle, &strComp), CUBLAS_STATUS_INTERNAL_ERROR);
if( (*alpha == make_zero<T>())//TODO
|| ( (side == CUBLAS_SIDE_LEFT) && (SIMPLE_SIZE(m)) )
|| ( (side == CUBLAS_SIDE_RIGHT) && (SIMPLE_SIZE(n)) ) ){
int Am = (side == CUBLAS_SIDE_LEFT) ? m : n;
//if B is not already in, copy in B block
if(!BIsIn)
check_error( status = cublasSetMatrixAsync( m, n, sizeof(T), h_B, incB, d_B, incB, strIn ), status);
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( Am, Am, sizeof(T), h_A, incA, d_A, incA, strIn ), status);
//wait for data to arrive
if(!AIsIn || !BIsIn){
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if( (status = Xtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, d_A, incA,
d_B, incB ) ) != CUBLAS_STATUS_SUCCESS ) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
}
else
if(side == CUBLAS_SIDE_LEFT){
int m1, m2;
if(REG_SIZE(m))
m1 = m2 = m/2;
else{
m1 = CLOSEST_REG_SIZE(m);
m2 = m-m1;
}
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) || (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
if( (!AIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error( status = cublasSetMatrixAsync( m, m, sizeof(T), h_A, incA, d_A, incA, strIn), status);
AIsIn = true;
}
if( (!BIsIn && SIMPLE_SIZE_DATA(m)) ){
check_error( status = cublasSetMatrixAsync( m, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if(uplo == CUBLAS_FILL_MODE_UPPER){
//Left / Upper / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, incB, h_B+m1, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m1, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*incA, incA, d_A+m1*incA, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1*incA, incA,
d_B+m1, incB,
alpha, d_B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m1, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, incB, d_B+m1, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1*incA, incA, d_A+m1*incA, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1*incA, incA,
d_B, incB,
alpha, d_B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}else{//uplo == KBLAS_Lower
//Left / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m1, n, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m2, n, sizeof(T), h_B+m1, incB, d_B+m1, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( m2, m1, sizeof(T), h_A+m1, incA, d_A+m1, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m2, n, m1,
&mone, d_A+m1, incA,
d_B, incB,
alpha, d_B+m1, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
&one, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Left / Lower / [Conj]Trans
else{//transa == KBLAS_Trans
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m2, n,
alpha, h_A+m1+m1*incA, incA, d_A+m1+m1*incA,
h_B+m1, incB, d_B+m1,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m2, n, sizeof(T), d_B+m1, incB, h_B+m1, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m1, n, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( m1, m2, sizeof(T), h_A+m1, incA, d_A+m1, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
trans, noTrans,
m1, n, m2,
&mone, d_A+m1, incA,
d_B+m1, incB,
alpha, d_B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m1, n,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}//transa == KBLAS_Trans
}
}
else{//side == KBLAS_Right
int n1, n2;
if(REG_SIZE(n))
n1 = n2 = n/2;
else{
n1 = CLOSEST_REG_SIZE(n);
n2 = n-n1;
}
if(uplo == KBLAS_Upper){
//Right / Upper / NoTrans
if(trans == noTrans){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n1, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*incB, incB, d_B+n1*incB, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1*incA, incA, d_A+n1*incA, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mone, d_B, incB,
d_A+n1*incA, incA,
alpha, d_B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
&one, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Upper / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*incB, incB, h_B+n1*incB, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m, n1, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1*incA, incA, d_A+n1*incA, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mInvAlpha, d_B+n1*incB, incB,
d_A+n1*incA, incA,
&one, d_B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
else{
//Right / Lower / NoTrans
if(trans == CUBLAS_OP_N){
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n2, sizeof(T), d_B+n1*incB, incB, h_B+n1*incB, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m, n1, sizeof(T), h_B, incB, d_B, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( n2, n1, sizeof(T), h_A+n1, incA, d_A+n1, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n1, n2,
&mone, d_B+n1*incB, incB,
d_A+n1, incA,
alpha, d_B, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
&one, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
//Right / Lower / [Conj]Trans
else{
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n1,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
BIsIn, false, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
//if stream is done computing and getBOut, copy B back.
if(getBOut){
check_error( cudaEventRecord(eComp, strComp), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strOut, eComp, 0), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n1, sizeof(T), d_B, incB, h_B, incB, strOut), status);
}
//prepare needed data
if(!AIsIn || !BIsIn){
//if B is not already in, copy B block
if(!BIsIn){
check_error( status = cublasSetMatrixAsync( m, n2, sizeof(T), h_B+n1*incB, incB, d_B+n1*incB, incB, strIn), status);
BIsIn = true;
}
//copy in A block
if(!AIsIn)
check_error( status = cublasSetMatrixAsync( n1, n2, sizeof(T), h_A+n1, incA, d_A+n1, incA, strIn), status);
//wait for data to arrive
check_error( cudaEventRecord(eDataIn, strIn), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamWaitEvent(strComp, eDataIn, 0), CUBLAS_STATUS_INTERNAL_ERROR);
}
if((status = cublasXgemm(handle,
noTrans, trans,
m, n2, n1,
&mInvAlpha, d_B, incB,
d_A+n1, incA,
&one, d_B+n1*incB, incB
)) != CUBLAS_STATUS_SUCCESS) return status;
if((status = kblasXtrsm(handle, strIn, strOut,
side, uplo, trans, diag,
m, n2,
alpha, h_A+n1+n1*incA, incA, d_A+n1+n1*incA,
h_B+n1*incB, incB, d_B+n1*incB,
BIsIn, getBOut, AIsIn
)) != CUBLAS_STATUS_SUCCESS) return status;
}
}
}//side == Right
check_error( cudaEventDestroy( eDataIn ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaEventDestroy( eComp ), CUBLAS_STATUS_INTERNAL_ERROR);
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
//#define DO_INLINE_BOUT 0
template<class T>
cublasStatus_t kblasXtrsm_cpu(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const T *alpha,
const T *h_A, int incA,
T *h_B, int incB){
//allocate memory on device
T *d_A, *d_B;
int Am, An, Bm, Bn;
if ( side == CUBLAS_SIDE_LEFT ) {
Am = An = m;
} else {
Am = An = n;
}
Bm = m;
Bn = n;
/*check_error( cudaHostRegister((void*)h_A, Am * An * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaHostRegister((void*)h_B, Bm * Bn * sizeof(T), cudaHostRegisterDefault), CUBLAS_STATUS_INTERNAL_ERROR);*/
cublasStatus_t status;
//*
int AsyncEngineCount, devID;
check_error( cudaGetDevice(&devID), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaDeviceGetAttribute(&AsyncEngineCount, cudaDevAttrAsyncEngineCount, devID), CUBLAS_STATUS_INTERNAL_ERROR);
bool DO_INLINE_BOUT = AsyncEngineCount > 1;
//*/
check_error( cudaMalloc( (void**)&d_A, (Am*An)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaMalloc( (void**)&d_B, (Bm*Bn)*sizeof(T) ), CUBLAS_STATUS_INTERNAL_ERROR);
//setup streams
cudaStream_t inStream, outStream;
check_error( cudaStreamCreateWithFlags( &inStream, cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
if(DO_INLINE_BOUT)
check_error( cudaStreamCreateWithFlags( &outStream, cudaStreamNonBlocking), CUBLAS_STATUS_INTERNAL_ERROR );
//call cpu API trmm
check_error(
(status = kblasXtrsm(handle, inStream, outStream,
side, uplo, trans,diag,
m, n,
alpha, h_A, incA, d_A,
h_B, incB, d_B,
false, DO_INLINE_BOUT, false)
), status);
//sync streams
if(DO_INLINE_BOUT){
check_error( cudaStreamSynchronize( outStream ), CUBLAS_STATUS_INTERNAL_ERROR);
}else{
cudaStream_t compStream;
check_error( cublasGetStream_v2(handle, &compStream), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( cudaStreamSynchronize( compStream ), CUBLAS_STATUS_INTERNAL_ERROR);
check_error( status = cublasGetMatrixAsync( m, n, sizeof(T), d_B, incB, h_B, incB, inStream), status);
}
//revoke streams
check_error( cudaStreamDestroy( inStream ), CUBLAS_STATUS_INTERNAL_ERROR);
if(DO_INLINE_BOUT)
check_error( cudaStreamDestroy( outStream ), CUBLAS_STATUS_INTERNAL_ERROR);
/*check_error( cudaHostUnregister( (void*)h_A ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error( cudaHostUnregister( (void*)h_B ), CUBLAS_STATUS_INTERNAL_ERROR );*/
//free device memory
check_error( cudaFree( d_A ), CUBLAS_STATUS_INTERNAL_ERROR );
check_error( cudaFree( d_B ), CUBLAS_STATUS_INTERNAL_ERROR );
return CUBLAS_STATUS_SUCCESS;
}
//==============================================================================================
/*extern "C" {
int kblas_strsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB,
cudaStream_t stream){
check_error(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB,
cudaStream_t stream){
check_error(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int incA,
cuComplex *B, int incB,
cudaStream_t stream){
check_error(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm_async(
char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int incA,
cuDoubleComplex *B, int incB,
cudaStream_t stream){
check_error(cublasSetKernelStream(stream));
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_strsm(
char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int incA,
float *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_dtrsm(
char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int incA,
double *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ctrsm(
char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int incA,
cuComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
int kblas_ztrsm(
char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int incA,
cuDoubleComplex *B, int incB){
return kblasXtrsm(
side, uplo, trans, diag,
m, n,
alpha, A, incA,
B, incB);
}
}*/
//==============================================================================================
#define kblasXtrsm_async_BODY { \
\
cublasHandle_t cublas_handle; \
if( cublasCreate(&cublas_handle) != CUBLAS_STATUS_SUCCESS ) return; \
if( cublasSetStream_v2(cublas_handle, stream) != CUBLAS_STATUS_SUCCESS ){ \
cublasDestroy_v2(cublas_handle); \
return; \
} \
cublasSideMode_t side_v2 = (side == KBLAS_Left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT); \
cublasFillMode_t uplo_v2 = (uplo == KBLAS_Lower ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER); \
cublasOperation_t trans_v2 = (trans == KBLAS_Trans ? CUBLAS_OP_T : CUBLAS_OP_N); \
cublasDiagType_t diag_v2 = (diag == KBLAS_Unit ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT); \
\
kblasXtrsm(cublas_handle, \
side_v2, uplo_v2, trans_v2, diag_v2, \
m, n, \
&alpha, A, lda, \
B, ldb); \
\
cublasDestroy_v2(cublas_handle); \
}
extern "C"{
void kblasStrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasDtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasCtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int lda,
cuComplex *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
void kblasZtrsm_async(char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb,
cudaStream_t stream){
kblasXtrsm_async_BODY
}
}
//==============================================================================================
void kblasStrsm(char side, char uplo, char trans, char diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb){
kblasStrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasDtrsm(char side, char uplo, char trans, char diag,
int m, int n,
double alpha, const double *A, int lda,
double *B, int ldb){
kblasDtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasCtrsm(char side, char uplo, char trans, char diag,
int m, int n,
cuComplex alpha, const cuComplex *A, int lda,
cuComplex *B, int ldb){
kblasCtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
void kblasZtrsm(char side, char uplo, char trans, char diag,
int m, int n,
cuDoubleComplex alpha, const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
kblasZtrsm_async(side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb,
0);
}
//==============================================================================================
cublasStatus_t kblasStrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasDtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasCtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblasZtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
return kblasXtrsm(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
//==============================================================================================
cublasStatus_t kblas_strsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const float *alpha,
const float *A, int lda,
float *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_dtrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const double *alpha,
const double *A, int lda,
double *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_ctrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuComplex *alpha,
const cuComplex *A, int lda,
cuComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
cublasStatus_t kblas_ztrsm(cublasHandle_t handle,
cublasSideMode_t side, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
cuDoubleComplex *B, int ldb){
return kblasXtrsm_cpu(handle,
side, uplo, trans, diag,
m, n,
alpha, A, lda,
B, ldb);
}
|
791231208546495ec7dc510363817ee9ccdf4b40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
#include "Random.h"
#define BLOCK_SIZE 512
#define SECTION_SIZE (2*BLOCK_SIZE)
#define INPUT_SIZE 4569
#define OUTPUT_SIZE INPUT_SIZE//((INPUT_SIZE-1)/(BLOCK_SIZE*2)+1)
__global__ void list_scan_kernel(float *input, float *output, float *sum, int len)
{
__shared__ float XY[SECTION_SIZE];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < len){
XY[threadIdx.x] = input[i];
}
//Reduction
for( int stride =1; stride < blockDim.x; stride *=2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index<blockDim.x)
XY[index] += XY[index-stride];
}
//Post reduction
for(int stride = SECTION_SIZE/4; stride>0; stride /= 2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index+stride < blockDim.x){
XY[index+stride] += XY[index];
}
}
__syncthreads();
output[i] = XY[threadIdx.x]; //results for each individual sections
__syncthreads();
if(threadIdx.x ==SECTION_SIZE-1){
sum[blockIdx.x] = XY[SECTION_SIZE-1];
}
}
__global__ void list_scan_kernel2(float *input, float *intermediate, int len)
{
__shared__ float XY[SECTION_SIZE];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < len){
XY[threadIdx.x] = input[i];
}
//Reduction
for( int stride =1; stride < blockDim.x; stride *=2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index<blockDim.x)
XY[index] += XY[index-stride];
}
//Post reduction
for(int stride = SECTION_SIZE/4; stride>0; stride /= 2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index+stride < blockDim.x){
XY[index+stride] += XY[index];
}
}
__syncthreads();
intermediate[i] = XY[threadIdx.x]; //results for each individual sections
}
__global__ void consolidation_kernel(float *input, float *output)
{
int i = blockIdx.x;
int t = (blockIdx.x+1) * blockDim.x + threadIdx.x;
if(i<(OUTPUT_SIZE-1)/(2*BLOCK_SIZE))
output[t] += input[i];
}
int main()
{
float *hInput, *hOutput;
float *dInput, *dOutput, *dSum;
//Random rr;
hInput = (float*)malloc(INPUT_SIZE*sizeof(float));
hOutput = (float*)malloc(INPUT_SIZE*sizeof(float));
for(int i=0; i<INPUT_SIZE; i++)
hInput[i] = float(i+1);//rr.random_integer(1,10));
hipMalloc((void**)&dInput, INPUT_SIZE * sizeof(float));
hipMalloc((void**)&dOutput, OUTPUT_SIZE * sizeof(float));
hipMalloc((void**)&dSum, ((OUTPUT_SIZE-1)/(2*BLOCK_SIZE)+1)*sizeof(float));
hipMemcpy(dInput, hInput, INPUT_SIZE * sizeof(float),hipMemcpyHostToDevice);
dim3 dimGrid((OUTPUT_SIZE-1)/SECTION_SIZE+1, 1,1);
dim3 dimBlock(SECTION_SIZE,1,1);
unsigned int len = INPUT_SIZE;
unsigned int sum_len = (OUTPUT_SIZE-1)/(2*BLOCK_SIZE)+1;
hipLaunchKernelGGL(( list_scan_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dInput, dOutput, dSum, len);
hipLaunchKernelGGL(( list_scan_kernel2), dim3(dimGrid), dim3(dimBlock), 0, 0, dSum, dSum, sum_len);
hipLaunchKernelGGL(( consolidation_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, dSum,dOutput);
hipMemcpy(hOutput, dOutput, OUTPUT_SIZE*sizeof(float),hipMemcpyDeviceToHost);
printf("The final result of list scan is:\n") ;
for(int i=0; i<OUTPUT_SIZE; i++){
printf("%3.0f,",hOutput[i]);
}
hipFree(dInput);
hipFree(dOutput);
hipFree(dSum);
free(hInput);
free(hOutput);
return 0;
}
|
791231208546495ec7dc510363817ee9ccdf4b40.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <cstdlib>
#include "Random.h"
#define BLOCK_SIZE 512
#define SECTION_SIZE (2*BLOCK_SIZE)
#define INPUT_SIZE 4569
#define OUTPUT_SIZE INPUT_SIZE//((INPUT_SIZE-1)/(BLOCK_SIZE*2)+1)
__global__ void list_scan_kernel(float *input, float *output, float *sum, int len)
{
__shared__ float XY[SECTION_SIZE];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < len){
XY[threadIdx.x] = input[i];
}
//Reduction
for( int stride =1; stride < blockDim.x; stride *=2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index<blockDim.x)
XY[index] += XY[index-stride];
}
//Post reduction
for(int stride = SECTION_SIZE/4; stride>0; stride /= 2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index+stride < blockDim.x){
XY[index+stride] += XY[index];
}
}
__syncthreads();
output[i] = XY[threadIdx.x]; //results for each individual sections
__syncthreads();
if(threadIdx.x ==SECTION_SIZE-1){
sum[blockIdx.x] = XY[SECTION_SIZE-1];
}
}
__global__ void list_scan_kernel2(float *input, float *intermediate, int len)
{
__shared__ float XY[SECTION_SIZE];
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i < len){
XY[threadIdx.x] = input[i];
}
//Reduction
for( int stride =1; stride < blockDim.x; stride *=2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index<blockDim.x)
XY[index] += XY[index-stride];
}
//Post reduction
for(int stride = SECTION_SIZE/4; stride>0; stride /= 2){
__syncthreads();
int index = (threadIdx.x+1)*stride*2-1;
if(index+stride < blockDim.x){
XY[index+stride] += XY[index];
}
}
__syncthreads();
intermediate[i] = XY[threadIdx.x]; //results for each individual sections
}
__global__ void consolidation_kernel(float *input, float *output)
{
int i = blockIdx.x;
int t = (blockIdx.x+1) * blockDim.x + threadIdx.x;
if(i<(OUTPUT_SIZE-1)/(2*BLOCK_SIZE))
output[t] += input[i];
}
int main()
{
float *hInput, *hOutput;
float *dInput, *dOutput, *dSum;
//Random rr;
hInput = (float*)malloc(INPUT_SIZE*sizeof(float));
hOutput = (float*)malloc(INPUT_SIZE*sizeof(float));
for(int i=0; i<INPUT_SIZE; i++)
hInput[i] = float(i+1);//rr.random_integer(1,10));
cudaMalloc((void**)&dInput, INPUT_SIZE * sizeof(float));
cudaMalloc((void**)&dOutput, OUTPUT_SIZE * sizeof(float));
cudaMalloc((void**)&dSum, ((OUTPUT_SIZE-1)/(2*BLOCK_SIZE)+1)*sizeof(float));
cudaMemcpy(dInput, hInput, INPUT_SIZE * sizeof(float),cudaMemcpyHostToDevice);
dim3 dimGrid((OUTPUT_SIZE-1)/SECTION_SIZE+1, 1,1);
dim3 dimBlock(SECTION_SIZE,1,1);
unsigned int len = INPUT_SIZE;
unsigned int sum_len = (OUTPUT_SIZE-1)/(2*BLOCK_SIZE)+1;
list_scan_kernel<<<dimGrid, dimBlock>>>(dInput, dOutput, dSum, len);
list_scan_kernel2<<<dimGrid, dimBlock>>>(dSum, dSum, sum_len);
consolidation_kernel<<<dimGrid,dimBlock>>>(dSum,dOutput);
cudaMemcpy(hOutput, dOutput, OUTPUT_SIZE*sizeof(float),cudaMemcpyDeviceToHost);
printf("The final result of list scan is:\n") ;
for(int i=0; i<OUTPUT_SIZE; i++){
printf("%3.0f,",hOutput[i]);
}
cudaFree(dInput);
cudaFree(dOutput);
cudaFree(dSum);
free(hInput);
free(hOutput);
return 0;
}
|
c7e72039db0d5d5ca7a475c8264bd10f0287e74a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "DeviceCalculations.cuh"
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <hiprand/hiprand.h>
struct inside_circle
{
__device__ int8_t operator()(const thrust::tuple<float, float>& p) const
{
return (((thrust::get<0>(p) - 0.5) * (thrust::get<0>(p) - 0.5) + (thrust::get<1>(p) - 0.5) * (thrust::get<1>(p) - 0.5)) < 0.25) ? 1 : 0;
}
};
__host__
size_t calc_on_device(size_t numberOfPoints)
{
thrust::device_vector<float> pointsX(numberOfPoints);
thrust::device_vector<float> pointsY(numberOfPoints);
// Generate random points using cuRAND
hiprandGenerator_t generator;
hiprandCreateGenerator(&generator, /*HIPRAND_RNG_QUASI_DEFAULT*/HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandGenerateUniform(generator, thrust::raw_pointer_cast(pointsX.data()), numberOfPoints);
hiprandGenerateUniform(generator, thrust::raw_pointer_cast(pointsY.data()), numberOfPoints);
// Count points inside circle using reduction from Thrust
thrust::device_vector<int8_t> insideCircle(numberOfPoints);
auto first = thrust::make_zip_iterator(thrust::make_tuple(pointsX.begin(), pointsY.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(pointsX.end() , pointsY.end() ));
thrust::transform(first, last, insideCircle.begin(), inside_circle());
size_t total = thrust::reduce(insideCircle.begin(), insideCircle.end(), (size_t)0, thrust::plus<size_t>());
return total;
}
|
c7e72039db0d5d5ca7a475c8264bd10f0287e74a.cu
|
#include "DeviceCalculations.cuh"
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <curand.h>
struct inside_circle
{
__device__ int8_t operator()(const thrust::tuple<float, float>& p) const
{
return (((thrust::get<0>(p) - 0.5) * (thrust::get<0>(p) - 0.5) + (thrust::get<1>(p) - 0.5) * (thrust::get<1>(p) - 0.5)) < 0.25) ? 1 : 0;
}
};
__host__
size_t calc_on_device(size_t numberOfPoints)
{
thrust::device_vector<float> pointsX(numberOfPoints);
thrust::device_vector<float> pointsY(numberOfPoints);
// Generate random points using cuRAND
curandGenerator_t generator;
curandCreateGenerator(&generator, /*CURAND_RNG_QUASI_DEFAULT*/CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniform(generator, thrust::raw_pointer_cast(pointsX.data()), numberOfPoints);
curandGenerateUniform(generator, thrust::raw_pointer_cast(pointsY.data()), numberOfPoints);
// Count points inside circle using reduction from Thrust
thrust::device_vector<int8_t> insideCircle(numberOfPoints);
auto first = thrust::make_zip_iterator(thrust::make_tuple(pointsX.begin(), pointsY.begin()));
auto last = thrust::make_zip_iterator(thrust::make_tuple(pointsX.end() , pointsY.end() ));
thrust::transform(first, last, insideCircle.begin(), inside_circle());
size_t total = thrust::reduce(insideCircle.begin(), insideCircle.end(), (size_t)0, thrust::plus<size_t>());
return total;
}
|
5f66217e26e1671c137f0f7a99fc8e168234e726.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
#define TILE_WIDTH 16
__global__ void MatrixMulKernel(int m, int n, int k, float *A, float *B, float *C)
{
//block
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
//,6
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//
float Cvalue = 0;
//A,B
for (int t = 0; t < (n - 1) / TILE_WIDTH + 1; ++t)
{
//A,Bshared memoryCA/B
if (Row < m && t * TILE_WIDTH + tx < n) //
//ds_A[tx][ty] = A[t*TILE_WIDTH + tx][Row];
ds_A[tx][ty] = A[Row * n + t * TILE_WIDTH + tx]; //
else
ds_A[tx][ty] = 0.0;
if (t * TILE_WIDTH + ty < n && Col < k)
//ds_B[tx][ty] = B[Col][t*TILE_WIDTH + ty];
ds_B[tx][ty] = B[(t * TILE_WIDTH + ty) * k + Col];
else
ds_B[tx][ty] = 0.0;
//tile
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
Cvalue += ds_A[i][ty] * ds_B[tx][i]; //shared memory
//
__syncthreads();
if (Row < m && Col < k)
C[Row * k + Col] = Cvalue;
}
}
int main()
{
//
int m = 4096, n = 4096, k = 4096;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *C = (float *)malloc(m * k * sizeof(float));
float *result = (float *)malloc(m * k * sizeof(float));
for (int i = 0; i < m; ++i)
for (int j = 0; j < m; ++j)
{
A[i * m + j] = (i - 0.1 * j + 1) / (i + j + 1);
B[i * m + j] = (j - 0.2 * i + 1) * (i + j + 1) / (i * i + j * j + 1);
C[i * m + j] = 0.0;
}
//
int size = sizeof(float);
float *d_a;
float *d_b;
float *d_c;
hipMalloc((void **)&d_a, m * n * size);
hipMalloc((void **)&d_b, n * k * size);
hipMalloc((void **)&d_c, m * k * size);
// GPU time calculate start
hipEvent_t start, stop;
float elapsedTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//HostDevice
hipMemcpy(d_a, A, size * m * n, hipMemcpyHostToDevice);
hipMemcpy(d_b, B, size * n * k, hipMemcpyHostToDevice);
hipMemcpy(d_c, C, size * m * k, hipMemcpyHostToDevice);
//
dim3 dimGrid((k - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1, 1); //
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
//
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, m, n, k, d_a, d_b, d_c);
//
hipMemcpy(C, d_c, size * m * k, hipMemcpyDeviceToHost);
// GPU time calculate end
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "GPU time: " << elapsedTime << " ms" << endl;
//CPU
/*clock_t begin = clock();
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float sum = 0;
for (int k = 0; k < m; ++k)
sum += A[i * m + k] * B[k * m + j];
result[i * m + j] = sum;
}
}
clock_t end = clock();
cout << "CPU time: " << (end - begin) * 1000 / CLOCKS_PER_SEC << " ms" << endl;
//
bool flag = true;
for (int i = 0; i < m * k; ++i)
{
if (abs(result[i] - C[i]) > 0.001)
{
flag = false;
cout << result[i] << "-" << C[i] << endl;
}
}
if (flag)
cout << "Correct!" << endl;
else
cout << "Error!" << endl;*/
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(A);
free(B);
free(C);
free(result);
return 0;
}
|
5f66217e26e1671c137f0f7a99fc8e168234e726.cu
|
#include <iostream>
using namespace std;
#define TILE_WIDTH 16
__global__ void MatrixMulKernel(int m, int n, int k, float *A, float *B, float *C)
{
//申请共享内存,存在于每个block中
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
//简化坐标记法,出现下面6个表示的地方就是并行的地方。
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//确定结果矩阵中的行和列
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//临时变量
float Cvalue = 0;
//循环读入A,B瓦片,计算结果矩阵,分阶段进行计算
for (int t = 0; t < (n - 1) / TILE_WIDTH + 1; ++t)
{
//将A,B矩阵瓦片化的结果放入shared memory中,每个线程加载相应于C元素的A/B矩阵元素
if (Row < m && t * TILE_WIDTH + tx < n) //越界处理,满足任意大小的矩阵相乘(可选)
//ds_A[tx][ty] = A[t*TILE_WIDTH + tx][Row];
ds_A[tx][ty] = A[Row * n + t * TILE_WIDTH + tx]; //以合并的方式加载瓦片
else
ds_A[tx][ty] = 0.0;
if (t * TILE_WIDTH + ty < n && Col < k)
//ds_B[tx][ty] = B[Col][t*TILE_WIDTH + ty];
ds_B[tx][ty] = B[(t * TILE_WIDTH + ty) * k + Col];
else
ds_B[tx][ty] = 0.0;
//保证tile中所有的元素被加载
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
Cvalue += ds_A[i][ty] * ds_B[tx][i]; //从shared memory中取值
//确保所有线程完成计算后,进行下一个阶段的计算
__syncthreads();
if (Row < m && Col < k)
C[Row * k + Col] = Cvalue;
}
}
int main()
{
//这里将矩阵按照行优先转换成了一维的形式
int m = 4096, n = 4096, k = 4096;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *C = (float *)malloc(m * k * sizeof(float));
float *result = (float *)malloc(m * k * sizeof(float));
for (int i = 0; i < m; ++i)
for (int j = 0; j < m; ++j)
{
A[i * m + j] = (i - 0.1 * j + 1) / (i + j + 1);
B[i * m + j] = (j - 0.2 * i + 1) * (i + j + 1) / (i * i + j * j + 1);
C[i * m + j] = 0.0;
}
//分配显存空间
int size = sizeof(float);
float *d_a;
float *d_b;
float *d_c;
cudaMalloc((void **)&d_a, m * n * size);
cudaMalloc((void **)&d_b, n * k * size);
cudaMalloc((void **)&d_c, m * k * size);
// GPU time calculate start
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//把数据从Host传到Device
cudaMemcpy(d_a, A, size * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, size * n * k, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, C, size * m * k, cudaMemcpyHostToDevice);
//分配网格结构
dim3 dimGrid((k - 1) / TILE_WIDTH + 1, (m - 1) / TILE_WIDTH + 1, 1); //向上取整
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
//调用内核函数
MatrixMulKernel<<<dimGrid, dimBlock>>>(m, n, k, d_a, d_b, d_c);
//将结果传回到主机端
cudaMemcpy(C, d_c, size * m * k, cudaMemcpyDeviceToHost);
// GPU time calculate end
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "GPU time: " << elapsedTime << " ms" << endl;
//CPU计算正确结果
/*clock_t begin = clock();
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float sum = 0;
for (int k = 0; k < m; ++k)
sum += A[i * m + k] * B[k * m + j];
result[i * m + j] = sum;
}
}
clock_t end = clock();
cout << "CPU time: " << (end - begin) * 1000 / CLOCKS_PER_SEC << " ms" << endl;
//比较结果
bool flag = true;
for (int i = 0; i < m * k; ++i)
{
if (abs(result[i] - C[i]) > 0.001)
{
flag = false;
cout << result[i] << "-" << C[i] << endl;
}
}
if (flag)
cout << "Correct!" << endl;
else
cout << "Error!" << endl;*/
//释放显存空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(A);
free(B);
free(C);
free(result);
return 0;
}
|
f4de01b79a2ba05eb638bbea046d8415f0dd2fab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test.h"
__global__ void plus(float* r) {
r[threadIdx.x] = 10;
}
void Test::TestTensor_GPU() {
std::vector<int> shape = { 3,3 };
Tensor<float> t1(shape);
CHECK_EQ(t1.size(), 9);
const int size = t1.size();
//change the gpu data, and check the cpu data(equal)
float* gpu_mutable_data = t1.mutable_gpu_data();
hipLaunchKernelGGL(( plus) , dim3(1), dim3(size) , 0, 0, gpu_mutable_data);
CUDA_CHECK(hipDeviceSynchronize());
const float* cpu_data = t1.cpu_data();
for (int i = 0; i < size; ++i) {
CHECK_EQ(cpu_data[i], 10);
}
}
|
f4de01b79a2ba05eb638bbea046d8415f0dd2fab.cu
|
#include "test.h"
__global__ void plus(float* r) {
r[threadIdx.x] = 10;
}
void Test::TestTensor_GPU() {
std::vector<int> shape = { 3,3 };
Tensor<float> t1(shape);
CHECK_EQ(t1.size(), 9);
const int size = t1.size();
//change the gpu data, and check the cpu data(equal)
float* gpu_mutable_data = t1.mutable_gpu_data();
plus <<<1, size >>> (gpu_mutable_data);
CUDA_CHECK(cudaDeviceSynchronize());
const float* cpu_data = t1.cpu_data();
for (int i = 0; i < size; ++i) {
CHECK_EQ(cpu_data[i], 10);
}
}
|
10c5bb076a7d2a82a3531c7911b1c79c884d1116.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_updater_cuda_kepler.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_upd_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * window_width * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * window_width;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_exact_upd_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * WINDOW_WIDTH;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
__global__ void convolution_3d_update_biases_upd_kernel_kepler(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict learning_rate,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
sum += __shfl_down(sum, tx);
}
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_learning_rate_val = learning_rate[offset];
atomicAdd(biases + offset, sum * current_learning_rate_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_upd_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_tex,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_exact_upd_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_tex,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_z_group>
__global__ void convolution_3d_update_weights_upd_kernel_kepler(
float * __restrict weights,
hipTextureObject_t input_tex,
hipTextureObject_t output_tex,
const float * __restrict learning_rate,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count) && (weight_x < window_width);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
int output_elem_id = (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
input_elem_id += window_width - WINDOW_WIDTH_LOCAL;
}
output_elem_id += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_learning_rate = learning_rate + offset;
if (single_output_z_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_z_group>
__global__ void convolution_3d_update_weights_exact_upd_kernel_kepler(
float * __restrict weights,
hipTextureObject_t input_tex,
hipTextureObject_t output_tex,
const float * __restrict learning_rate,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
int output_elem_id = (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
}
output_elem_id += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_learning_rate = learning_rate + offset;
if (single_output_z_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
convolution_3d_layer_updater_cuda_kepler::convolution_3d_layer_updater_cuda_kepler()
{
}
convolution_3d_layer_updater_cuda_kepler::~convolution_3d_layer_updater_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_tex_upd_kernel_kepler<block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_deriviative_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_3d_deriviative_tex_upd_kernel_kepler<block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_z_group_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_exact_upd_kernel_kepler<window_width_const, single_output_z_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], input_tex, output_tex, *learning_rate[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_z_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_z_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_z_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_z_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_z_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_z_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_z_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_z_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_z_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_z_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_z_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_z_group_const) \
hipLaunchKernelGGL(( convolution_3d_update_weights_upd_kernel_kepler<single_output_z_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], input_tex, output_tex, *learning_rate[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
void convolution_3d_layer_updater_cuda_kepler::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * forward_input_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_kepler::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not configured to do backprop but requested to");
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * backward_output_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_kepler::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& learning_rate,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
hipLaunchKernelGGL(( convolution_3d_update_biases_upd_kernel_kepler), dim3(grid_size), dim3(block_size), 0, stream_id,
*data[1],
*output_errors_buffer,
*learning_rate[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
int packed_config_count = window_sizes[1] * window_sizes[2] * updater_output_z_group_count * updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[1]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (updater_output_z_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
entry_count);
if (updater_output_z_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_3d_layer_updater_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_updater_cuda_kepler::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 3> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
space_filling_curve<3>::fill_pattern(size_list, updater_config_ordered_list1);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_updater_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_3d_layer_updater_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_updater_cuda_kepler::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_3d_layer_updater_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<5>) * forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<5>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
res.push_back(sizeof(packed_config<5>) * backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
return res;
}
void convolution_3d_layer_updater_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
{
new_elem.set_val(4, input_feature_map_group_id * forward_input_feature_map_group_size);
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(3, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < forward_x_block_count; ++x)
{
new_elem.set_val(0, x * forward_x_block_size);
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(3, it2->at(0) * FEATURE_MAP_BLOCK_SIZE);
new_elem.set_val(4, it2->at(1));
for(std::vector<std::tr1::array<int, 3> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
task_list.push_back(new_elem);
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
{
new_elem.set_val(4, output_feature_map_group_id * backward_output_feature_map_group_size);
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(3, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < backward_x_block_count; ++x)
{
new_elem.set_val(0, x * backward_x_block_size + (backward_x_block_size - 1));
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), hipMemcpyHostToDevice));
}
}
void convolution_3d_layer_updater_cuda_kepler::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_z_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * max_entry_count * updater_window_x_block_count * window_sizes[1] * window_sizes[2],
output_configuration_specific.dimension_sizes[2]);
updater_output_z_group_size = (output_configuration_specific.dimension_sizes[2] + updater_output_z_group_count - 1) / updater_output_z_group_count;
{
std::tr1::array<int, 2> size_list;
size_list[0] = updater_output_feature_map_block_count;
size_list[1] = updater_output_z_group_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
int convolution_3d_layer_updater_cuda_kepler::get_dynamic_memobject_count() const
{
return 3;
}
}
}
|
10c5bb076a7d2a82a3531c7911b1c79c884d1116.cu
|
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_3d_layer_updater_cuda_kepler.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "packed_config.h"
#include "space_filling_curve.h"
#include "../convolution_layer.h"
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
namespace nnforge
{
namespace cuda
{
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_upd_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * window_width * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * window_width;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - window_width;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_3d_tex_exact_upd_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int base_input_feature_map_id = conf.get_val(4);
int weight_count_per_output_feature_map = window_depth * window_height * WINDOW_WIDTH * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_depth * window_height * WINDOW_WIDTH;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
} // for input_x
input_elem_id += input_width - WINDOW_WIDTH;
} // for input_y
input_elem_id += input_width * (input_height - window_height);
} // for input_z
input_elem_id += input_height * input_width * (input_depth - window_depth);
}
float * base_output = output + (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_depth * output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
__global__ void convolution_3d_update_biases_upd_kernel_kepler(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict learning_rate,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
sum += __shfl_down(sum, tx);
}
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_learning_rate_val = learning_rate[offset];
atomicAdd(biases + offset, sum * current_learning_rate_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_upd_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_tex,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * window_width;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit_x = b_fit_y && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit_x = b_fit_y && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit_x)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_3d_deriviative_tex_exact_upd_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_tex,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (entry_id < entry_count) && (packed_config_id < packed_config_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int x = conf.get_val(0);
int y = conf.get_val(1);
int z = conf.get_val(2);
int input_feature_map_id = conf.get_val(3);
int base_output_feature_map_id = conf.get_val(4);
int weight_count_per_input_feature_map = window_depth * window_height * WINDOW_WIDTH;
int output_elem_id = (((entry_id * output_feature_map_count + base_output_feature_map_id) * output_depth + z) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * weight_count_per_input_feature_map;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_z_exclusive = z - output_depth;
int max_z_inclusive = z;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_z = 0; input_z < window_depth; ++input_z)
{
bool b_fit_z = (input_z > min_z_exclusive) && (input_z <= max_z_inclusive);
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit_y = b_fit_z && (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit_x = b_fit_y && (((1 << i) & mask) != 0);
if (b_fit_x)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
} // for input_y
output_elem_id += output_width * (window_height - output_height);
} // for input_z
output_elem_id += output_width * output_height * (output_depth + window_depth);
weights_offset += weight_count_per_input_feature_map * (input_feature_map_count - 1);
}
float * base_input = input_errors + (((entry_id * input_feature_map_count + input_feature_map_id) * input_depth + z) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_depth * input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_z_group>
__global__ void convolution_3d_update_weights_upd_kernel_kepler(
float * __restrict weights,
cudaTextureObject_t input_tex,
cudaTextureObject_t output_tex,
const float * __restrict learning_rate,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_width,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int weight_x = (blockIdx.x * blockDim.x + threadIdx.x) * WINDOW_WIDTH_LOCAL;
int packed_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count) && (weight_x < window_width);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
int output_elem_id = (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
input_elem_id += window_width - WINDOW_WIDTH_LOCAL;
}
output_elem_id += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_learning_rate = learning_rate + offset;
if (single_output_z_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_z_group>
__global__ void convolution_3d_update_weights_exact_upd_kernel_kepler(
float * __restrict weights,
cudaTextureObject_t input_tex,
cudaTextureObject_t output_tex,
const float * __restrict learning_rate,
const packed_config<5> * __restrict packed_config_list,
int output_width,
int output_height,
int output_depth,
int input_width,
int input_height,
int input_depth,
int window_height,
int window_depth,
int input_feature_map_count,
int output_feature_map_count,
int output_z_group_count,
int texture_offset,
int entry_count,
bool different_input,
int packed_config_count)
{
int packed_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = blockIdx.y * blockDim.y + threadIdx.y;
bool in_bounds = (packed_config_id < packed_config_count) && (entry_id < entry_count);
if (in_bounds)
{
packed_config<5> conf = packed_config_list[packed_config_id];
int weight_y = conf.get_val(0);
int weight_z = conf.get_val(1);
int input_feature_map_id = conf.get_val(2);
int output_feature_map_id = conf.get_val(3);
int output_z_start_id = conf.get_val(4);
int output_neuron_count_per_feature_map = output_depth * output_width * output_height;
int output_elem_id = (((entry_id * output_feature_map_count + output_feature_map_id) * output_depth + output_z_start_id) * output_height) * output_width;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_depth + weight_z + output_z_start_id) * input_height + weight_y) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int output_z = output_z_start_id; output_z < output_depth; output_z += output_z_group_count)
{
for(int output_y = 0; output_y < output_height; output_y++)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch<float>(output_tex, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
}
output_elem_id += output_height * output_width * (output_z_group_count - 1);
input_elem_id += input_height * input_width * (output_z_group_count - 1) + (input_width * (window_height - 1));
}
int offset = ((((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_depth + weight_z) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_depth * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_learning_rate = learning_rate + offset;
if (single_output_z_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_learning_rate[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
convolution_3d_layer_updater_cuda_kepler::convolution_3d_layer_updater_cuda_kepler()
{
}
convolution_3d_layer_updater_cuda_kepler::~convolution_3d_layer_updater_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
convolution_3d_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
convolution_3d_tex_upd_kernel_kepler<block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, packed_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
convolution_3d_deriviative_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
convolution_3d_deriviative_tex_upd_kernel_kepler<block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_tex, packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, packed_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_z_group_const) \
convolution_3d_update_weights_exact_upd_kernel_kepler<window_width_const, single_output_z_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], input_tex, output_tex, *learning_rate[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_z_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_z_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_z_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_z_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_z_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_z_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_z_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_z_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_z_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_z_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_z_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_z_group_const) \
convolution_3d_update_weights_upd_kernel_kepler<single_output_z_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], input_tex, output_tex, *learning_rate[0], packed_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], output_configuration_specific.dimension_sizes[2], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[2], window_sizes[0], window_sizes[1], window_sizes[2], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_z_group_count, texture_offset, entry_count, different_input, packed_config_count);
void convolution_3d_layer_updater_cuda_kepler::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * forward_input_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[0]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_kepler::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not configured to do backprop but requested to");
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
int packed_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * backward_output_feature_map_group_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_3d_layer_updater_cuda_kepler::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& learning_rate,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
convolution_3d_update_biases_upd_kernel_kepler<<<grid_size, block_size, 0, stream_id>>>(
*data[1],
*output_errors_buffer,
*learning_rate[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
int packed_config_count = window_sizes[1] * window_sizes[2] * updater_output_z_group_count * updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const packed_config<5> * packed_config_list = static_cast<const packed_config<5> *>((const void *)*additional_buffers[1]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
packed_config_count,
entry_count,
1);
if (updater_output_z_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
updater_window_x_block_count,
packed_config_count,
entry_count);
if (updater_output_z_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_3d_layer_updater_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_3d_layer_updater_cuda_kepler::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
{
std::tr1::array<int, 3> size_list;
size_list[0] = window_sizes[1];
size_list[1] = window_sizes[2];
size_list[2] = input_configuration_specific.feature_map_count;
space_filling_curve<3>::fill_pattern(size_list, updater_config_ordered_list1);
}
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_3d_layer_updater_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_3d_layer_updater_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_3d_layer_updater_cuda_kepler::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_3d_layer_updater_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(packed_config<5>) * forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(packed_config<5>) * window_sizes[1] * window_sizes[2] * output_configuration_specific.dimension_sizes[1] * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
res.push_back(sizeof(packed_config<5>) * backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
return res;
}
void convolution_3d_layer_updater_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
{
new_elem.set_val(4, input_feature_map_group_id * forward_input_feature_map_group_size);
for(int output_feature_map_block_id = 0; output_feature_map_block_id < forward_output_feature_map_block_count; ++output_feature_map_block_id)
{
new_elem.set_val(3, output_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < output_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < forward_x_block_count; ++x)
{
new_elem.set_val(0, x * forward_x_block_size);
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(std::vector<std::tr1::array<int, 2> >::const_iterator it2 = updater_config_ordered_list2.begin(); it2 != updater_config_ordered_list2.end(); ++it2)
{
new_elem.set_val(3, it2->at(0) * FEATURE_MAP_BLOCK_SIZE);
new_elem.set_val(4, it2->at(1));
for(std::vector<std::tr1::array<int, 3> >::const_iterator it1 = updater_config_ordered_list1.begin(); it1 != updater_config_ordered_list1.end(); ++it1)
{
new_elem.set_val(0, it1->at(0));
new_elem.set_val(1, it1->at(1));
new_elem.set_val(2, it1->at(2));
task_list.push_back(new_elem);
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
std::vector<packed_config<5> > task_list;
packed_config<5> new_elem;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
{
new_elem.set_val(4, output_feature_map_group_id * backward_output_feature_map_group_size);
for(int input_feature_map_block_id = 0; input_feature_map_block_id < backward_input_feature_map_block_count; ++input_feature_map_block_id)
{
new_elem.set_val(3, input_feature_map_block_id * FEATURE_MAP_BLOCK_SIZE);
for(int z = 0; z < input_configuration_specific.dimension_sizes[2]; ++z)
{
new_elem.set_val(2, z);
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
{
new_elem.set_val(1, y);
for(int x = 0; x < backward_x_block_count; ++x)
{
new_elem.set_val(0, x * backward_x_block_size + (backward_x_block_size - 1));
task_list.push_back(new_elem);
}
}
}
}
}
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(packed_config<5>) * task_list.size(), cudaMemcpyHostToDevice));
}
}
void convolution_3d_layer_updater_cuda_kepler::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * output_configuration_specific.dimension_sizes[2] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_z_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * max_entry_count * updater_window_x_block_count * window_sizes[1] * window_sizes[2],
output_configuration_specific.dimension_sizes[2]);
updater_output_z_group_size = (output_configuration_specific.dimension_sizes[2] + updater_output_z_group_count - 1) / updater_output_z_group_count;
{
std::tr1::array<int, 2> size_list;
size_list[0] = updater_output_feature_map_block_count;
size_list[1] = updater_output_z_group_count;
space_filling_curve<2>::fill_pattern(size_list, updater_config_ordered_list2);
}
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * input_configuration_specific.dimension_sizes[2] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
int convolution_3d_layer_updater_cuda_kepler::get_dynamic_memobject_count() const
{
return 3;
}
}
}
|
7f3fd01d182aee970064a63c76526674a38ee22f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
STEPS
1. Allocate host memory and initialized host data e.g. malloc
2. Allocate device memory e.g hipMalloc
3. Transfer input data from host to device memory e.g hipMemcpy
4. Execute kernels
5. Transfer output from device memory to host
6. Free Host & CUDA memory e.g. free & hipFree
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = 0;
int stride = 1;
for(int i = index; i < n; i+=stride){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// 1. Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// 1. Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// 2. Allocate device memory
hipMalloc((void**)&d_a, sizeof(float) * N);
hipMalloc((void**)&d_b, sizeof(float) * N);
hipMalloc((void**)&d_out, sizeof(float) * N);
// 3. Transfer input data from host to device
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
// 4. Kernel launch
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(1), 0, 0, d_out, d_a, d_b, N); //use only one thread
// 5. Transfer output from device memory to host
hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost);
// 6. Free cuda memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
7f3fd01d182aee970064a63c76526674a38ee22f.cu
|
/*
STEPS
1. Allocate host memory and initialized host data e.g. malloc
2. Allocate device memory e.g cudaMalloc
3. Transfer input data from host to device memory e.g cudaMemcpy
4. Execute kernels
5. Transfer output from device memory to host
6. Free Host & CUDA memory e.g. free & cudaFree
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_add(float *out, float *a, float *b, int n) {
int index = 0;
int stride = 1;
for(int i = index; i < n; i+=stride){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// 1. Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// 1. Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// 2. Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// 3. Transfer input data from host to device
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// 4. Kernel launch
vector_add<<<1,1>>>(d_out, d_a, d_b, N); //use only one thread
// 5. Transfer output from device memory to host
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// 6. Free cuda memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
}
|
b7afc87d48f5a9917921d067648f88099cc899c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matmul(double *a, double *b, double *c, int n)
{
// Get global thread ID
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Not out of bounds
if((Col<n) && (Row<n)) {// Mutliply matrices
// c[Row*n + Col] = 0;
double sum = 0.0;
for(int k=0;k<n;k++) {
// c[Row*n + Col] += a[Row*n+k]*b[k*n+Col];
sum += a[Row*n+k]*b[k*n+Col];
}
c[Row*n + Col] = sum;
}
}
|
b7afc87d48f5a9917921d067648f88099cc899c5.cu
|
#include "includes.h"
__global__ void matmul(double *a, double *b, double *c, int n)
{
// Get global thread ID
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Not out of bounds
if((Col<n) && (Row<n)) {// Mutliply matrices
// c[Row*n + Col] = 0;
double sum = 0.0;
for(int k=0;k<n;k++) {
// c[Row*n + Col] += a[Row*n+k]*b[k*n+Col];
sum += a[Row*n+k]*b[k*n+Col];
}
c[Row*n + Col] = sum;
}
}
|
cf7697f2aaaa17ce730552f40b84177f23a236fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#ifdef __HIPCC__
#define inline __host__ __device__ inline
#include <vdt/sin.h>
#undef inline
#else
#include <vdt/sin.h>
#endif
#include "DataFormats/Math/interface/approx_log.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
std::mt19937 eng;
std::mt19937 eng2;
std::uniform_real_distribution<float> rgen(0., 1.);
constexpr float myExp(float x) { return unsafe_expf<6>(x); }
constexpr float myLog(float x) { return unsafe_logf<6>(x); }
__host__ __device__ inline float mySin(float x) { return vdt::fast_sinf(x); }
constexpr int USEEXP = 0, USESIN = 1, USELOG = 2;
template <int USE, bool ADDY = false>
// __host__ __device__
constexpr float testFunc(float x, float y) {
float ret = 0;
if (USE == USEEXP)
ret = myExp(x);
else if (USE == USESIN)
ret = mySin(x);
else
ret = myLog(x);
return ADDY ? ret + y : ret;
}
template <int USE, bool ADDY>
__global__ void vectorOp(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY>
void vectorOpH(const float *A, const float *B, float *C, int numElements) {
for (int i = 0; i < numElements; ++i) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY = false>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
int numElements = 200000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector of " << numElements << " elements]\n";
auto h_A = std::make_unique<float[]>(numElements);
auto h_B = std::make_unique<float[]>(numElements);
auto h_C = std::make_unique<float[]>(numElements);
auto h_C2 = std::make_unique<float[]>(numElements);
std::generate(h_A.get(), h_A.get() + numElements, [&]() { return rgen(eng); });
std::generate(h_B.get(), h_B.get() + numElements, [&]() { return rgen(eng); });
delta -= (std::chrono::high_resolution_clock::now() - start);
auto d_A = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
auto d_B = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
auto d_C = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
cudaCheck(hipMemcpy(d_A.get(), h_A.get(), size, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_B.get(), h_B.get(), size, hipMemcpyHostToDevice));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda alloc+copy took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Launch the Vector OP CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n";
delta -= (std::chrono::high_resolution_clock::now() - start);
cms::cuda::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cms::cuda::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudaCheck(hipMemcpy(h_C.get(), d_C.get(), size, hipMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda copy back took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// on host now...
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Verify that the result vector is correct
double ave = 0;
int maxDiff = 0;
long long ndiff = 0;
double fave = 0;
float fmaxDiff = 0;
for (int i = 0; i < numElements; ++i) {
approx_math::binary32 g, c;
g.f = testFunc<USE, ADDY>(h_A[i], h_B[i]);
c.f = h_C[i];
auto diff = std::abs(g.i32 - c.i32);
maxDiff = ::max(diff, maxDiff);
ave += diff;
if (diff != 0)
++ndiff;
auto fdiff = std::abs(g.f - c.f);
fave += fdiff;
fmaxDiff = ::max(fdiff, fmaxDiff);
// if (diff>7)
// std::cerr << "Large diff at element " << i << ' ' << diff << ' ' << std::hexfloat
// << g.f << "!=" << c.f << "\n";
}
std::cout << "ndiff ave, max " << ndiff << ' ' << ave / numElements << ' ' << maxDiff << std::endl;
std::cout << "float ave, max " << fave / numElements << ' ' << fmaxDiff << std::endl;
if (!ndiff) {
std::cout << "Test PASSED\n";
std::cout << "SUCCESS" << std::endl;
}
hipDeviceSynchronize();
}
int main() {
cms::cudatest::requireDevices();
try {
go<USEEXP>();
go<USESIN>();
go<USELOG>();
go<USELOG, true>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
cf7697f2aaaa17ce730552f40b84177f23a236fc.cu
|
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#ifdef __CUDACC__
#define inline __host__ __device__ inline
#include <vdt/sin.h>
#undef inline
#else
#include <vdt/sin.h>
#endif
#include "DataFormats/Math/interface/approx_log.h"
#include "DataFormats/Math/interface/approx_exp.h"
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
std::mt19937 eng;
std::mt19937 eng2;
std::uniform_real_distribution<float> rgen(0., 1.);
constexpr float myExp(float x) { return unsafe_expf<6>(x); }
constexpr float myLog(float x) { return unsafe_logf<6>(x); }
__host__ __device__ inline float mySin(float x) { return vdt::fast_sinf(x); }
constexpr int USEEXP = 0, USESIN = 1, USELOG = 2;
template <int USE, bool ADDY = false>
// __host__ __device__
constexpr float testFunc(float x, float y) {
float ret = 0;
if (USE == USEEXP)
ret = myExp(x);
else if (USE == USESIN)
ret = mySin(x);
else
ret = myLog(x);
return ADDY ? ret + y : ret;
}
template <int USE, bool ADDY>
__global__ void vectorOp(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY>
void vectorOpH(const float *A, const float *B, float *C, int numElements) {
for (int i = 0; i < numElements; ++i) {
C[i] = testFunc<USE, ADDY>(A[i], B[i]);
}
}
template <int USE, bool ADDY = false>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
int numElements = 200000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector of " << numElements << " elements]\n";
auto h_A = std::make_unique<float[]>(numElements);
auto h_B = std::make_unique<float[]>(numElements);
auto h_C = std::make_unique<float[]>(numElements);
auto h_C2 = std::make_unique<float[]>(numElements);
std::generate(h_A.get(), h_A.get() + numElements, [&]() { return rgen(eng); });
std::generate(h_B.get(), h_B.get() + numElements, [&]() { return rgen(eng); });
delta -= (std::chrono::high_resolution_clock::now() - start);
auto d_A = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
auto d_B = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
auto d_C = cms::cuda::make_device_unique<float[]>(numElements, nullptr);
cudaCheck(cudaMemcpy(d_A.get(), h_A.get(), size, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_B.get(), h_B.get(), size, cudaMemcpyHostToDevice));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda alloc+copy took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Launch the Vector OP CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n";
delta -= (std::chrono::high_resolution_clock::now() - start);
cms::cuda::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cms::cuda::launch(
vectorOp<USE, ADDY>, {blocksPerGrid, threadsPerBlock}, d_A.get(), d_B.get(), d_C.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
cudaCheck(cudaMemcpy(h_C.get(), d_C.get(), size, cudaMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "cuda copy back took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// on host now...
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
delta -= (std::chrono::high_resolution_clock::now() - start);
vectorOpH<USE, ADDY>(h_A.get(), h_B.get(), h_C2.get(), numElements);
delta += (std::chrono::high_resolution_clock::now() - start);
std::cout << "host computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
// Verify that the result vector is correct
double ave = 0;
int maxDiff = 0;
long long ndiff = 0;
double fave = 0;
float fmaxDiff = 0;
for (int i = 0; i < numElements; ++i) {
approx_math::binary32 g, c;
g.f = testFunc<USE, ADDY>(h_A[i], h_B[i]);
c.f = h_C[i];
auto diff = std::abs(g.i32 - c.i32);
maxDiff = std::max(diff, maxDiff);
ave += diff;
if (diff != 0)
++ndiff;
auto fdiff = std::abs(g.f - c.f);
fave += fdiff;
fmaxDiff = std::max(fdiff, fmaxDiff);
// if (diff>7)
// std::cerr << "Large diff at element " << i << ' ' << diff << ' ' << std::hexfloat
// << g.f << "!=" << c.f << "\n";
}
std::cout << "ndiff ave, max " << ndiff << ' ' << ave / numElements << ' ' << maxDiff << std::endl;
std::cout << "float ave, max " << fave / numElements << ' ' << fmaxDiff << std::endl;
if (!ndiff) {
std::cout << "Test PASSED\n";
std::cout << "SUCCESS" << std::endl;
}
cudaDeviceSynchronize();
}
int main() {
cms::cudatest::requireDevices();
try {
go<USEEXP>();
go<USESIN>();
go<USELOG>();
go<USELOG, true>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
ccc1c29a199bb7a6145988c11fbf48cede68e8a2.hip
|
// !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <sys/mman.h>
// CUDA runtime
#include <hip/hip_runtime.h>
extern "C" {
#include "../gpusync.h"
}
// Stream for the thread's GPU Operations
hipStream_t stream;
float *h;
float *d;
extern "C" void init(int sync_level) {
/*
* The sync_level parameter is an integer that indicates the desired level of
* synchronization used by the GPU driver (values defined below). The
* specified level is used in hipSetDeviceFlags() to set the level
* prior to initialization.
*/
switch (sync_level) {
case 0:
hipSetDeviceFlags(hipDeviceScheduleSpin);
break;
case 1:
hipSetDeviceFlags(hipDeviceScheduleYield);
break;
case 2:
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
break;
default:
fprintf(stderr, "Unknown sync level: %d\n", sync_level);
break;
}
// Zero copy enable
hipSetDeviceFlags(hipDeviceMapHost);
// Follow convention and initialize CUDA/GPU
// used here to invoke initialization of GPU locking
hipFree(0);
// Pin code
if(!mlockall(MCL_CURRENT | MCL_FUTURE)) {
fprintf(stderr, "Failed to lock code pages.\n");
exit(EXIT_FAILURE);
}
// Set the device context
hipSetDevice(0);
// create a user defined stream
hipStreamCreate(&stream);
}
extern "C" void mallocCPU(int numElements) {
hipError_t err = hipHostMalloc((void **) &h, numElements, hipHostMallocMapped);
if (err != hipSuccess) {
fprintf(stderr, "Failed to allocate host memory (error code %s)!\n", hipGetErrorString(err));
}
}
extern "C" void mallocGPU(int numElements) {
// Nothing to do
}
#define SPLITSIZE 8192
extern "C" void copyin(int numElements) {
// these calls are asynchronous so only the lock of CE can be handled in the wrapper
hipError_t err = hipHostGetDevicePointer((void **) &d, (void *) h, 0);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy memory from host to device (error code %s)!\n", hipGetErrorString(err));
return;
}
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
hipStreamSynchronize(stream);
}
extern "C" void exec(int numElements) {
// Nothing to do
}
extern "C" void copyout() {
// Nothing to do
}
extern "C" void freeGPU() {
// Nothing to do
}
extern "C" void freeCPU() {
// Free host memory that was pinned
hipHostFree(h);
}
extern "C" void finish() {
// clean up the user allocated stream
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
// Reset the device and return
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application returns
hipError_t err = hipDeviceReset();
if (err != hipSuccess) {
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
}
}
|
ccc1c29a199bb7a6145988c11fbf48cede68e8a2.cu
|
// System includes
#include <stdio.h>
#include <assert.h>
#include <sys/mman.h>
// CUDA runtime
#include <cuda_runtime.h>
extern "C" {
#include "../gpusync.h"
}
// Stream for the thread's GPU Operations
cudaStream_t stream;
float *h;
float *d;
extern "C" void init(int sync_level) {
/*
* The sync_level parameter is an integer that indicates the desired level of
* synchronization used by the GPU driver (values defined below). The
* specified level is used in cudaSetDeviceFlags() to set the level
* prior to initialization.
*/
switch (sync_level) {
case 0:
cudaSetDeviceFlags(cudaDeviceScheduleSpin);
break;
case 1:
cudaSetDeviceFlags(cudaDeviceScheduleYield);
break;
case 2:
cudaSetDeviceFlags(cudaDeviceBlockingSync);
break;
default:
fprintf(stderr, "Unknown sync level: %d\n", sync_level);
break;
}
// Zero copy enable
cudaSetDeviceFlags(cudaDeviceMapHost);
// Follow convention and initialize CUDA/GPU
// used here to invoke initialization of GPU locking
cudaFree(0);
// Pin code
if(!mlockall(MCL_CURRENT | MCL_FUTURE)) {
fprintf(stderr, "Failed to lock code pages.\n");
exit(EXIT_FAILURE);
}
// Set the device context
cudaSetDevice(0);
// create a user defined stream
cudaStreamCreate(&stream);
}
extern "C" void mallocCPU(int numElements) {
cudaError_t err = cudaHostAlloc((void **) &h, numElements, cudaHostAllocMapped);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate host memory (error code %s)!\n", cudaGetErrorString(err));
}
}
extern "C" void mallocGPU(int numElements) {
// Nothing to do
}
#define SPLITSIZE 8192
extern "C" void copyin(int numElements) {
// these calls are asynchronous so only the lock of CE can be handled in the wrapper
cudaError_t err = cudaHostGetDevicePointer((void **) &d, (void *) h, 0);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy memory from host to device (error code %s)!\n", cudaGetErrorString(err));
return;
}
// synchronize with the stream
// the wrapper for this function releases any lock held (CE here)
cudaStreamSynchronize(stream);
}
extern "C" void exec(int numElements) {
// Nothing to do
}
extern "C" void copyout() {
// Nothing to do
}
extern "C" void freeGPU() {
// Nothing to do
}
extern "C" void freeCPU() {
// Free host memory that was pinned
cudaFreeHost(h);
}
extern "C" void finish() {
// clean up the user allocated stream
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
// Reset the device and return
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application returns
cudaError_t err = cudaDeviceReset();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
}
}
|
093548cc62d7484322195e26f92a9d50a2e536e0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include <hip/hip_runtime.h>
/****************NTHREADS****************/
#define T1 512
#define T2 512
#define T3 128
#define T4 64
#define T5 256
#define T6 1024
/*****************BLOCK COUNT**************/
#define F1 3
#define F2 3
#define F3 6
#define F4 6
#define F5 5
#define F6 1
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ int d_step,d_bottom,d_maxdepth;
__device__ unsigned int d_blkcnt;
__device__ float d_radius;
/*************INITIALIZATION***************/
__global__ void InitializationKernel()
{
d_step = -1;
d_maxdepth = 1;
d_blkcnt = 0;
//printf("DEBUG---------> InitializationKernel");
}
/****************BOUNDING BOX KERNEL************************/
__global__ void BoundingBoxKernel(int d_nnodes, int d_nbodies, volatile int * __restrict d_start,
volatile int * __restrict d_child, volatile float * __restrict d_mass, volatile float * __restrict d_posx,
volatile float * __restrict d_posy, volatile float * __restrict d_posz, volatile float * __restrict d_maxx,
volatile float * __restrict d_maxy, volatile float * __restrict d_maxz, volatile float * __restrict d_minx,
volatile float * __restrict d_miny, volatile float * __restrict d_minz)
{
register int i,j,k,l;
register float val,minx,maxx,miny,maxy,minz,maxz;
__shared__ volatile float sminx[T1],sminy[T1],sminz[T1],smaxx[T1],smaxy[T1],smaxz[T1];
//initial min and max for x,y,z for each thread
minx = maxx = d_posx[0];
miny = maxy = d_posy[0];
minz = maxz = d_posz[0];
//go through all the bodies
//printf("minx,miny,minz %d ,%d, %d",minx,miny,minz);
i=threadIdx.x;
l=T1*gridDim.x;
for (j = i + blockIdx.x * T1; j < d_nbodies; j += l)
{
val = d_posx[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = d_posy[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = d_posz[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
//reduction of each thread in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
//do a binary sort and find a minimum in a block
for(j = T1/2; j > 0 ; j /= 2)
{
__syncthreads();
if(i < j)
{
k = i + j;
sminx[i] = minx = fminf(minx,sminx[k]);
sminy[i] = miny = fminf(miny,sminy[k]);
sminz[i] = minz = fminf(minz,sminz[k]);
smaxx[i] = maxx = fmaxf(maxx,smaxx[k]);
smaxy[i] = maxy = fmaxf(maxy,smaxy[k]);
smaxz[i] = maxz = fmaxf(maxz,smaxz[k]);
}
}
//write to global memory for each block the min and max positions
if(i == 0)
{
k = blockIdx.x;
d_minx[k] = minx;
d_miny[k] = miny;
d_minz[k] = minz;
d_maxx[k] = maxx;
d_maxy[k] = maxy;
d_maxz[k] = maxz;
__threadfence();
l = gridDim.x - 1;
if( l == atomicInc(&d_blkcnt,l))
{
for(j = 0 ;j<= l; j++)
{
minx = fminf(minx,d_minx[j]);
miny = fminf(miny,d_miny[j]);
minz = fminf(minz,d_minz[j]);
maxx = fmaxf(maxx,d_maxx[j]);
maxy = fmaxf(maxy,d_maxy[j]);
maxz = fmaxf(maxz,d_maxz[j]);
}
val = fmaxf (maxx - minx, maxy - miny);
d_radius = fmaxf(val,maxz-minz) * 0.5f;
k=d_nnodes;
d_bottom = k;
d_mass[k] = -1.0f;
d_start[k] = 0;
d_posx[k] = (minx + maxx) * 0.5f;
d_posy[k] = (miny + maxy) * 0.5f;
d_posz[k] = (minz + maxz) * 0.5f;
k *= 8;
for(i = 0; i < 8; i++)
d_child[k+i] = -1;
d_step = d_step + 1;
}
}
}
/************END OF BOUNDING BOX KERNEL*******************/
__global__ void InitializationKernel1(int d_nodes,int d_nbodies,volatile int * __restrict d_child)
{
//Initialize for Tree Building Kernel
register int k,l,top,bottom;
top = 8 * d_nodes;
bottom = 8 * d_nbodies;
l = blockDim.x *gridDim.x;
//align threads to warpsize for memory coalescing
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x;
if(k < bottom)
k += l;
while(k<top)
{
d_child[k] = -1;
k+=l;
}
}
/***********************Building the Tree kernel******************************************/
__global__ void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, exit, l;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, p;
register float radius, rootx, rooty, rootz;
// cache root data
radius = d_radius;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
exit = 1;
l = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
//printf("DEBUG---------------> TREE BUILDING KERNEL - 1, i am here 1" );
if (exit != 0) {
// new body, so start traversing at root
exit = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
//printf("DEBUG---------------> TREE BUILDING KERNEL - 2" );
//printf("x,y,z %d, %d, %d",x,y,z);
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
//if not null create space
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += l; // move on to next body
exit = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
p = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&d_bottom, 1) - 1;
//printf("DEBUG---------------> TREE BUILDING KERNEL - 3, i am here 1" );
if (p != -1) {
childd[n*8+j] = cell;
}
p = max(p, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += l; // move on to next body
exit = 2;
}
}
}
__syncthreads(); // __threadfence();
if (exit == 2) {
childd[locked] = p;
}
}
// record maximum tree depth
atomicMax((int *)&d_maxdepth, localmaxdepth);
}
__global__ void InitializationKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
int k, l, bottom;
bottom = d_bottom;
l = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += l;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += l;
}
}
/**************************Compute Centre of gravity**************************************/
__global__ void CoGKernel(const int d_nnodes,const int d_nbodies,volatile int * __restrict d_count,
const int * __restrict d_child,volatile float * __restrict d_mass,volatile float * __restrict d_posx,
volatile float * __restrict d_posy,volatile float * __restrict d_posz)
{
int i,j,k,ch,l,count,bottom,flag,restart;
float m,cm,px,py,pz;
__shared__ int child[T3*8];
__shared__ float mass[T3*8];
bottom = d_bottom;
l = blockDim.x * gridDim.x;
//align threads to warpsize
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x*blockDim.x;
int tid = threadIdx.x;
if(k <bottom)
{
k = k+l;
}
restart = k;
//caching the children into shared memory
//keep trying for 5 times without waiting
for(j=0;j<5;j++)
{
while(k < d_nnodes)
{
//Allocation order used ensures that cell's children have a lower array indices than the cell
//therefore it is not possible that a thread will ever attempt to process a cell before it has
//processed its children
//this prevents deadlock
if(d_mass[k] < 0.0f)
{
//iterate for all children
//k represents leaf node
for(i=0;i<8;i++)
{
/*Because the majority of cells in the octree have only bodies as children, the corresponding
threads can immediately compute the cell data. */
ch = d_child[k*8+i];
/****Write into the cache****/
//cache all the child pointers of the current cell in shared memory that point to not ready
//children. This way the thread polls only missing children which reduces global accesses.
child[i*T3 + tid] = ch;
if(ch >= d_nbodies && ((mass[i*T3+tid] = d_mass[ch]) <0.0f))
{
break;
}
}
//all children are ready
if(i==8)
{
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
count = 0;
//scan all threads
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if(ch >= 0)
{
//count the bodies in all subtrees and store this information in the cells
if(ch >= d_nbodies)
{
m = mass[i*T3+tid];
count += d_count[ch];
}
else
{
m = d_mass[ch];
count++;
}
//add child's contribution
//printf("%d",ch);
cm = cm + m;
px = px + d_posx[ch] * m;
py = py + d_posy[ch] * m;
pz = pz + d_posz[ch] * m;
}
}
//printf("%d",px);
d_count[k] = count;
m = 1.0f/cm;
d_posx[k] = px *m;
d_posy[k] = py *m;
d_posz[k] = pz *m;
__threadfence(); //to ensure data is visible before setting mass
d_mass[k] = cm;
}
}
k += l;
}
k = restart;
}
flag = 0;
/*extra operation :move all non-null child pointers to the front */
j=0;
//iterate over all the cells assigned to thread
while( k < d_nnodes)
{
if(d_mass[k] >= 0.0f)
{
k+=l;
}
else
{
if(j==0)
{
j = 8;
for(i=0;i<8;i++)
{
ch = d_child[k*8+i];
child[i*T3 + tid] = ch;
if((ch < d_nbodies) || ((mass[i*T3 + tid]=d_mass[ch]) >= 0.0f))
{
j--;
}
}
}
else
{
j=8;
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if((ch < d_nbodies) || (mass[i*T3 + tid]=d_mass[ch] >= 0.0f) || ((mass[i*T3 + tid]=d_mass[ch]) >= 0.0f) )
{
j--;
}
}
}
if(j==0)
{
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
count = 0;
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if(ch >= 0)
{
if(ch >= d_nbodies)
{
m = mass[i*T3+tid];
count += d_count[ch];
}
else
{
m = d_mass[ch];
count++;
}
//add child's contribution
cm = cm + m;
px = px + d_posx[ch] * m;
py = py + d_posy[ch] * m;
pz = pz + d_posz[ch] * m;
}
}
d_count[k] = count;
m = 1.0f/cm;
d_posx[k] = px *m;
d_posy[k] = py *m;
d_posz[k] = pz *m;
flag = 1;
}
}
__syncthreads();
if(flag!=0)
{
d_mass[k] = cm;
k+=l;
flag=0;
}
}
}
/**********************END OF CoGKernel****************************/
/**********************SORT KERNEL**********************************/
__global__ void SortKernel(int d_nnodes, int d_nbodies, int * d_sort, int * d_count, volatile int * d_start, int * d_child)
{
int i, j, k, ch, l, start, b;
b = d_bottom;
l = blockDim.x * gridDim.x;
k = d_nnodes + 1 - l + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= b) {
start = d_start[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = d_child[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front
d_child[k*8+i] = -1;
d_child[k*8+j] = ch;
}
j++;
if (ch >= d_nbodies) {
// child is a cell
d_start[ch] = start; // set start ID of child
start += d_count[ch]; // add #bodies in subtree
} else {
// child is a body
d_sort[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= l; // move on to next cell
}
}
}
/************************END OF SORT KERNEL*************************/
/************************Force Calculation kernel*********************/
/************************END OF FORCE CALCULATION KERNEL**********************/
__global__ void ForceKernel(int d_nnodes, int d_nbodies, float dt, float d_itsq, float epsilon,volatile int* __restrict d_sort,volatile int* __restrict d_child,volatile float* __restrict d_mass,volatile float* __restrict d_posx,volatile float* __restrict d_posy,volatile float* __restrict d_posz,volatile float* __restrict d_velx,volatile float* __restrict d_vely,volatile float* __restrict d_velz,volatile float* __restrict d_accx,volatile float* __restrict d_accy,volatile float* __restrict d_accz)
{
register int i,j,k,n,depth,base,sbase,i_delta,pd,nd;
register float tempx, tempy, tempz, ax, ay, az, dx, dy, dz, temp;
__shared__ volatile int pos[MAXDEPTH*T5/WARPSIZE], node[MAXDEPTH*T5/WARPSIZE];
__shared__ float interm[MAXDEPTH*T5/WARPSIZE];
if(threadIdx.x == 0)
{
temp = d_radius *2;
interm[0] = temp*temp*d_itsq;
for(i=1;i<d_maxdepth;i++)
{
interm[i]=interm[i-1]*0.25f;
interm[i-1] +=epsilon;
}
interm[i-1] +=epsilon;
}
//SYNCTHREADS -----------> 12/03/2014
__syncthreads();
if(d_maxdepth<=MAXDEPTH)
{
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
i_delta = threadIdx.x - sbase;
if(i_delta<MAXDEPTH)
{
interm[i_delta+j] = interm[i_delta];
}
__syncthreads();
__threadfence_block();
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < d_nbodies; k += blockDim.x * gridDim.x) {
i=d_sort[k];
tempx=d_posx[i];
tempy=d_posy[i];
tempz=d_posz[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
depth =j;
if(sbase == threadIdx.x)
{
pos[j] = 0;
node[j] = d_nnodes * 8;
}
do {
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
n = d_child[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = d_posx[n] - tempx;
dy = d_posy[n] - tempy;
dz = d_posz[n] - tempz;
temp = dx*dx + (dy*dy + (dz*dz + epsilon));
if ((n < d_nbodies) || __all(temp >= interm[depth])) { temp = rsqrt(temp);
temp = d_mass[n]*temp*temp*temp;
ax += dx * temp;
ay += dy * temp;
az += dz * temp;
} else {
if (sbase == threadIdx.x) {
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd=0;
nd=n*8;
}
} else {
pd = 8; // early out
}
}
depth--; // done with this level
} while (depth >= j);
if (d_step > 0) {
d_velx[i] += (ax-d_accx[i])*dt;
d_vely[i] += (ax-d_accy[i])*dt;
d_velz[i] +=(ax-d_accy[i])*dt;
}
d_accx[i] = ax;
d_accy[i] = ay;
d_accz[i] = az;
}
}
}
/*****************************UPDATE VELOCITY AND POSITION KERNEL****************/
__global__ void UpdateKernel(int d_nbodies, float d_dtime, float d_dthf, float * d_posx, float * d_posy,
float * d_posz, float * dvelx, float * dvely, float * dvelz,
float * d_accx, float * d_accy, float * d_accz)
{
int i, l;
float d_velx, d_vely, d_velz;
float velhx, velhy, velhz;
l = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < d_nbodies; i += l) {
d_velx = d_accx[i] * d_dthf;
d_vely = d_accy[i] * d_dthf;
d_velz = d_accz[i] * d_dthf;
velhx = dvelx[i] + d_velx;
velhy = dvely[i] + d_vely;
velhz = dvelz[i] + d_velz;
d_posx[i] += velhx * d_dtime;
d_posy[i] += velhy * d_dtime;
d_posz[i] += velhz * d_dtime;
dvelx[i] = velhx + d_velx;
dvely[i] = velhy + d_vely;
dvelz[i] = velhz + d_velz;
}
}
|
093548cc62d7484322195e26f92a9d50a2e536e0.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include <cuda.h>
/****************NTHREADS****************/
#define T1 512
#define T2 512
#define T3 128
#define T4 64
#define T5 256
#define T6 1024
/*****************BLOCK COUNT**************/
#define F1 3
#define F2 3
#define F3 6
#define F4 6
#define F5 5
#define F6 1
#define WARPSIZE 32
#define MAXDEPTH 32
__device__ int d_step,d_bottom,d_maxdepth;
__device__ unsigned int d_blkcnt;
__device__ float d_radius;
/*************INITIALIZATION***************/
__global__ void InitializationKernel()
{
d_step = -1;
d_maxdepth = 1;
d_blkcnt = 0;
//printf("DEBUG---------> InitializationKernel");
}
/****************BOUNDING BOX KERNEL************************/
__global__ void BoundingBoxKernel(int d_nnodes, int d_nbodies, volatile int * __restrict d_start,
volatile int * __restrict d_child, volatile float * __restrict d_mass, volatile float * __restrict d_posx,
volatile float * __restrict d_posy, volatile float * __restrict d_posz, volatile float * __restrict d_maxx,
volatile float * __restrict d_maxy, volatile float * __restrict d_maxz, volatile float * __restrict d_minx,
volatile float * __restrict d_miny, volatile float * __restrict d_minz)
{
register int i,j,k,l;
register float val,minx,maxx,miny,maxy,minz,maxz;
__shared__ volatile float sminx[T1],sminy[T1],sminz[T1],smaxx[T1],smaxy[T1],smaxz[T1];
//initial min and max for x,y,z for each thread
minx = maxx = d_posx[0];
miny = maxy = d_posy[0];
minz = maxz = d_posz[0];
//go through all the bodies
//printf("minx,miny,minz %d ,%d, %d",minx,miny,minz);
i=threadIdx.x;
l=T1*gridDim.x;
for (j = i + blockIdx.x * T1; j < d_nbodies; j += l)
{
val = d_posx[j];
minx = fminf(minx, val);
maxx = fmaxf(maxx, val);
val = d_posy[j];
miny = fminf(miny, val);
maxy = fmaxf(maxy, val);
val = d_posz[j];
minz = fminf(minz, val);
maxz = fmaxf(maxz, val);
}
//reduction of each thread in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
//do a binary sort and find a minimum in a block
for(j = T1/2; j > 0 ; j /= 2)
{
__syncthreads();
if(i < j)
{
k = i + j;
sminx[i] = minx = fminf(minx,sminx[k]);
sminy[i] = miny = fminf(miny,sminy[k]);
sminz[i] = minz = fminf(minz,sminz[k]);
smaxx[i] = maxx = fmaxf(maxx,smaxx[k]);
smaxy[i] = maxy = fmaxf(maxy,smaxy[k]);
smaxz[i] = maxz = fmaxf(maxz,smaxz[k]);
}
}
//write to global memory for each block the min and max positions
if(i == 0)
{
k = blockIdx.x;
d_minx[k] = minx;
d_miny[k] = miny;
d_minz[k] = minz;
d_maxx[k] = maxx;
d_maxy[k] = maxy;
d_maxz[k] = maxz;
__threadfence();
l = gridDim.x - 1;
if( l == atomicInc(&d_blkcnt,l))
{
for(j = 0 ;j<= l; j++)
{
minx = fminf(minx,d_minx[j]);
miny = fminf(miny,d_miny[j]);
minz = fminf(minz,d_minz[j]);
maxx = fmaxf(maxx,d_maxx[j]);
maxy = fmaxf(maxy,d_maxy[j]);
maxz = fmaxf(maxz,d_maxz[j]);
}
val = fmaxf (maxx - minx, maxy - miny);
d_radius = fmaxf(val,maxz-minz) * 0.5f;
k=d_nnodes;
d_bottom = k;
d_mass[k] = -1.0f;
d_start[k] = 0;
d_posx[k] = (minx + maxx) * 0.5f;
d_posy[k] = (miny + maxy) * 0.5f;
d_posz[k] = (minz + maxz) * 0.5f;
k *= 8;
for(i = 0; i < 8; i++)
d_child[k+i] = -1;
d_step = d_step + 1;
}
}
}
/************END OF BOUNDING BOX KERNEL*******************/
__global__ void InitializationKernel1(int d_nodes,int d_nbodies,volatile int * __restrict d_child)
{
//Initialize for Tree Building Kernel
register int k,l,top,bottom;
top = 8 * d_nodes;
bottom = 8 * d_nbodies;
l = blockDim.x *gridDim.x;
//align threads to warpsize for memory coalescing
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x;
if(k < bottom)
k += l;
while(k<top)
{
d_child[k] = -1;
k+=l;
}
}
/***********************Building the Tree kernel******************************************/
__global__ void TreeBuildingKernel(int nnodesd, int nbodiesd, volatile int * __restrict childd, volatile float * __restrict posxd, volatile float * __restrict posyd, volatile float * __restrict poszd)
{
register int i, j, depth, localmaxdepth, exit, l;
register float x, y, z, r;
register float px, py, pz;
register float dx, dy, dz;
register int ch, n, cell, locked, p;
register float radius, rootx, rooty, rootz;
// cache root data
radius = d_radius;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
exit = 1;
l = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
//printf("DEBUG---------------> TREE BUILDING KERNEL - 1, i am here 1" );
if (exit != 0) {
// new body, so start traversing at root
exit = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius * 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
if (rootx < px) {j = 1; dx = r;}
if (rooty < py) {j |= 2; dy = r;}
if (rootz < pz) {j |= 4; dz = r;}
x = rootx + dx;
y = rooty + dy;
z = rootz + dz;
}
//printf("DEBUG---------------> TREE BUILDING KERNEL - 2" );
//printf("x,y,z %d, %d, %d",x,y,z);
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
// determine which child to follow
//if not null create space
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == -1) {
if (-1 == atomicCAS((int *)&childd[locked], -1, i)) { // if null, just insert the new body
localmaxdepth = max(depth, localmaxdepth);
i += l; // move on to next body
exit = 1;
}
} else { // there already is a body in this position
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
p = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&d_bottom, 1) - 1;
//printf("DEBUG---------------> TREE BUILDING KERNEL - 3, i am here 1" );
if (p != -1) {
childd[n*8+j] = cell;
}
p = max(p, cell);
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j |= 2;
if (z < poszd[ch]) j |= 4;
childd[cell*8+j] = ch;
n = cell;
r *= 0.5f;
dx = dy = dz = -r;
j = 0;
if (x < px) {j = 1; dx = r;}
if (y < py) {j |= 2; dy = r;}
if (z < pz) {j |= 4; dz = r;}
x += dx;
y += dy;
z += dz;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
localmaxdepth = max(depth, localmaxdepth);
i += l; // move on to next body
exit = 2;
}
}
}
__syncthreads(); // __threadfence();
if (exit == 2) {
childd[locked] = p;
}
}
// record maximum tree depth
atomicMax((int *)&d_maxdepth, localmaxdepth);
}
__global__ void InitializationKernel2(int nnodesd, volatile int * __restrict startd, volatile float * __restrict massd)
{
int k, l, bottom;
bottom = d_bottom;
l = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += l;
// iterate over all cells assigned to thread
while (k < nnodesd) {
massd[k] = -1.0f;
startd[k] = -1;
k += l;
}
}
/**************************Compute Centre of gravity**************************************/
__global__ void CoGKernel(const int d_nnodes,const int d_nbodies,volatile int * __restrict d_count,
const int * __restrict d_child,volatile float * __restrict d_mass,volatile float * __restrict d_posx,
volatile float * __restrict d_posy,volatile float * __restrict d_posz)
{
int i,j,k,ch,l,count,bottom,flag,restart;
float m,cm,px,py,pz;
__shared__ int child[T3*8];
__shared__ float mass[T3*8];
bottom = d_bottom;
l = blockDim.x * gridDim.x;
//align threads to warpsize
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x*blockDim.x;
int tid = threadIdx.x;
if(k <bottom)
{
k = k+l;
}
restart = k;
//caching the children into shared memory
//keep trying for 5 times without waiting
for(j=0;j<5;j++)
{
while(k < d_nnodes)
{
//Allocation order used ensures that cell's children have a lower array indices than the cell
//therefore it is not possible that a thread will ever attempt to process a cell before it has
//processed its children
//this prevents deadlock
if(d_mass[k] < 0.0f)
{
//iterate for all children
//k represents leaf node
for(i=0;i<8;i++)
{
/*Because the majority of cells in the octree have only bodies as children, the corresponding
threads can immediately compute the cell data. */
ch = d_child[k*8+i];
/****Write into the cache****/
//cache all the child pointers of the current cell in shared memory that point to not ready
//children. This way the thread polls only missing children which reduces global accesses.
child[i*T3 + tid] = ch;
if(ch >= d_nbodies && ((mass[i*T3+tid] = d_mass[ch]) <0.0f))
{
break;
}
}
//all children are ready
if(i==8)
{
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
count = 0;
//scan all threads
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if(ch >= 0)
{
//count the bodies in all subtrees and store this information in the cells
if(ch >= d_nbodies)
{
m = mass[i*T3+tid];
count += d_count[ch];
}
else
{
m = d_mass[ch];
count++;
}
//add child's contribution
//printf("%d",ch);
cm = cm + m;
px = px + d_posx[ch] * m;
py = py + d_posy[ch] * m;
pz = pz + d_posz[ch] * m;
}
}
//printf("%d",px);
d_count[k] = count;
m = 1.0f/cm;
d_posx[k] = px *m;
d_posy[k] = py *m;
d_posz[k] = pz *m;
__threadfence(); //to ensure data is visible before setting mass
d_mass[k] = cm;
}
}
k += l;
}
k = restart;
}
flag = 0;
/*extra operation :move all non-null child pointers to the front */
j=0;
//iterate over all the cells assigned to thread
while( k < d_nnodes)
{
if(d_mass[k] >= 0.0f)
{
k+=l;
}
else
{
if(j==0)
{
j = 8;
for(i=0;i<8;i++)
{
ch = d_child[k*8+i];
child[i*T3 + tid] = ch;
if((ch < d_nbodies) || ((mass[i*T3 + tid]=d_mass[ch]) >= 0.0f))
{
j--;
}
}
}
else
{
j=8;
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if((ch < d_nbodies) || (mass[i*T3 + tid]=d_mass[ch] >= 0.0f) || ((mass[i*T3 + tid]=d_mass[ch]) >= 0.0f) )
{
j--;
}
}
}
if(j==0)
{
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
count = 0;
for(i=0;i<8;i++)
{
ch = child[i*T3 + tid];
if(ch >= 0)
{
if(ch >= d_nbodies)
{
m = mass[i*T3+tid];
count += d_count[ch];
}
else
{
m = d_mass[ch];
count++;
}
//add child's contribution
cm = cm + m;
px = px + d_posx[ch] * m;
py = py + d_posy[ch] * m;
pz = pz + d_posz[ch] * m;
}
}
d_count[k] = count;
m = 1.0f/cm;
d_posx[k] = px *m;
d_posy[k] = py *m;
d_posz[k] = pz *m;
flag = 1;
}
}
__syncthreads();
if(flag!=0)
{
d_mass[k] = cm;
k+=l;
flag=0;
}
}
}
/**********************END OF CoGKernel****************************/
/**********************SORT KERNEL**********************************/
__global__ void SortKernel(int d_nnodes, int d_nbodies, int * d_sort, int * d_count, volatile int * d_start, int * d_child)
{
int i, j, k, ch, l, start, b;
b = d_bottom;
l = blockDim.x * gridDim.x;
k = d_nnodes + 1 - l + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= b) {
start = d_start[k];
if (start >= 0) {
j = 0;
for (i = 0; i < 8; i++) {
ch = d_child[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front
d_child[k*8+i] = -1;
d_child[k*8+j] = ch;
}
j++;
if (ch >= d_nbodies) {
// child is a cell
d_start[ch] = start; // set start ID of child
start += d_count[ch]; // add #bodies in subtree
} else {
// child is a body
d_sort[start] = ch; // record body in 'sorted' array
start++;
}
}
}
k -= l; // move on to next cell
}
}
}
/************************END OF SORT KERNEL*************************/
/************************Force Calculation kernel*********************/
/************************END OF FORCE CALCULATION KERNEL**********************/
__global__ void ForceKernel(int d_nnodes, int d_nbodies, float dt, float d_itsq, float epsilon,volatile int* __restrict d_sort,volatile int* __restrict d_child,volatile float* __restrict d_mass,volatile float* __restrict d_posx,volatile float* __restrict d_posy,volatile float* __restrict d_posz,volatile float* __restrict d_velx,volatile float* __restrict d_vely,volatile float* __restrict d_velz,volatile float* __restrict d_accx,volatile float* __restrict d_accy,volatile float* __restrict d_accz)
{
register int i,j,k,n,depth,base,sbase,i_delta,pd,nd;
register float tempx, tempy, tempz, ax, ay, az, dx, dy, dz, temp;
__shared__ volatile int pos[MAXDEPTH*T5/WARPSIZE], node[MAXDEPTH*T5/WARPSIZE];
__shared__ float interm[MAXDEPTH*T5/WARPSIZE];
if(threadIdx.x == 0)
{
temp = d_radius *2;
interm[0] = temp*temp*d_itsq;
for(i=1;i<d_maxdepth;i++)
{
interm[i]=interm[i-1]*0.25f;
interm[i-1] +=epsilon;
}
interm[i-1] +=epsilon;
}
//SYNCTHREADS -----------> 12/03/2014
__syncthreads();
if(d_maxdepth<=MAXDEPTH)
{
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
i_delta = threadIdx.x - sbase;
if(i_delta<MAXDEPTH)
{
interm[i_delta+j] = interm[i_delta];
}
__syncthreads();
__threadfence_block();
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < d_nbodies; k += blockDim.x * gridDim.x) {
i=d_sort[k];
tempx=d_posx[i];
tempy=d_posy[i];
tempz=d_posz[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
depth =j;
if(sbase == threadIdx.x)
{
pos[j] = 0;
node[j] = d_nnodes * 8;
}
do {
pd = pos[depth];
nd = node[depth];
while (pd < 8) {
n = d_child[nd + pd]; // load child pointer
pd++;
if (n >= 0) {
dx = d_posx[n] - tempx;
dy = d_posy[n] - tempy;
dz = d_posz[n] - tempz;
temp = dx*dx + (dy*dy + (dz*dz + epsilon));
if ((n < d_nbodies) || __all(temp >= interm[depth])) { temp = rsqrt(temp);
temp = d_mass[n]*temp*temp*temp;
ax += dx * temp;
ay += dy * temp;
az += dz * temp;
} else {
if (sbase == threadIdx.x) {
pos[depth] = pd;
node[depth] = nd;
}
depth++;
pd=0;
nd=n*8;
}
} else {
pd = 8; // early out
}
}
depth--; // done with this level
} while (depth >= j);
if (d_step > 0) {
d_velx[i] += (ax-d_accx[i])*dt;
d_vely[i] += (ax-d_accy[i])*dt;
d_velz[i] +=(ax-d_accy[i])*dt;
}
d_accx[i] = ax;
d_accy[i] = ay;
d_accz[i] = az;
}
}
}
/*****************************UPDATE VELOCITY AND POSITION KERNEL****************/
__global__ void UpdateKernel(int d_nbodies, float d_dtime, float d_dthf, float * d_posx, float * d_posy,
float * d_posz, float * dvelx, float * dvely, float * dvelz,
float * d_accx, float * d_accy, float * d_accz)
{
int i, l;
float d_velx, d_vely, d_velz;
float velhx, velhy, velhz;
l = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < d_nbodies; i += l) {
d_velx = d_accx[i] * d_dthf;
d_vely = d_accy[i] * d_dthf;
d_velz = d_accz[i] * d_dthf;
velhx = dvelx[i] + d_velx;
velhy = dvely[i] + d_vely;
velhz = dvelz[i] + d_velz;
d_posx[i] += velhx * d_dtime;
d_posy[i] += velhy * d_dtime;
d_posz[i] += velhz * d_dtime;
dvelx[i] = velhx + d_velx;
dvely[i] = velhy + d_vely;
dvelz[i] = velhz + d_velz;
}
}
|
8ee1bddaec4d87ddbf16bc1ffe24ce0f07585efb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_trig.cu
* \brief GPU Implementation of unary trigometric functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
hipMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
hipMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
hipMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
8ee1bddaec4d87ddbf16bc1ffe24ce0f07585efb.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_unary_op_trig.cu
* \brief GPU Implementation of unary trigometric functions.
*/
#include "./elemwise_binary_op.h"
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(relu)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>);
NNVM_REGISTER_OP(_backward_relu)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::relu_grad>>);
NNVM_REGISTER_OP(sigmoid)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>);
NNVM_REGISTER_OP(_backward_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sigmoid_grad>>);
NNVM_REGISTER_OP(hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>);
NNVM_REGISTER_OP(_backward_hard_sigmoid)
.set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>);
// softsign
NNVM_REGISTER_OP(softsign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>);
NNVM_REGISTER_OP(_backward_softsign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::softsign_grad>>);
// copy
NNVM_REGISTER_OP(_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
NNVM_REGISTER_OP(_backward_copy)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(BlockGrad)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
NNVM_REGISTER_OP(make_loss)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>);
// identity output as first input, but attributes are constrainted to be like rhs
NNVM_REGISTER_OP(_identity_with_attr_like_rhs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>);
NNVM_REGISTER_OP(reshape_like)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>);
void ShapeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
cudaMemcpyAsync(out_data.dptr_,
in_data.shape_.data(),
in_data.ndim() * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(shape_array)
.set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU);
void SizeComputeGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
const index_t size_var = in_data.Size();
cudaMemcpyAsync(out_data.dptr_,
&size_var,
1U * sizeof(int64_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
}
NNVM_REGISTER_OP(size_array)
.set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU);
NNVM_REGISTER_OP(Cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
NNVM_REGISTER_OP(_backward_cast)
.set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>);
// negative
NNVM_REGISTER_OP(negative)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>);
// reciprocal
NNVM_REGISTER_OP(reciprocal)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal>);
NNVM_REGISTER_OP(_backward_reciprocal)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::reciprocal_grad> >);
// abs
NNVM_REGISTER_OP(abs)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>);
NNVM_REGISTER_OP(_backward_abs)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >);
// sign
NNVM_REGISTER_OP(sign)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>);
NNVM_REGISTER_OP(_backward_sign)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::sign_grad> >);
// round
NNVM_REGISTER_OP(round)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>);
// ceil
NNVM_REGISTER_OP(ceil)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>);
// floor
NNVM_REGISTER_OP(floor)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>);
// trunc
NNVM_REGISTER_OP(trunc)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>);
// rint
NNVM_REGISTER_OP(rint)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>);
// fix
NNVM_REGISTER_OP(fix)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>);
// square
NNVM_REGISTER_OP(square)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square>);
NNVM_REGISTER_OP(_backward_square)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_grad> >);
// sqrt
NNVM_REGISTER_OP(sqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::square_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::square_root>);
NNVM_REGISTER_OP(_backward_sqrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::square_root_grad> >);
// rsqrt
NNVM_REGISTER_OP(rsqrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_square_root>);
NNVM_REGISTER_OP(_backward_rsqrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_square_root_grad> >);
// cbrt
NNVM_REGISTER_OP(cbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::cube_root>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::cube_root>);
NNVM_REGISTER_OP(_backward_cbrt)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::cube_root_grad> >);
// rcbrt
NNVM_REGISTER_OP(rcbrt)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::reciprocal_cube_root>);
NNVM_REGISTER_OP(_backward_rcbrt)
.set_attr<FCompute>("FCompute<gpu>",
ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::reciprocal_cube_root_grad> >);
// exp
NNVM_REGISTER_OP(exp)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::exp>);
// log
NNVM_REGISTER_OP(log)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log>);
// log10
NNVM_REGISTER_OP(log10)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log10>);
// log2
NNVM_REGISTER_OP(log2)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log2>);
NNVM_REGISTER_OP(_backward_log)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log_grad> >);
NNVM_REGISTER_OP(_backward_log10)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log10_grad> >);
NNVM_REGISTER_OP(_backward_log2)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log2_grad> >);
// log1p
NNVM_REGISTER_OP(log1p)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::log1p>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::log1p>);
NNVM_REGISTER_OP(_backward_log1p)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::log1p_grad> >);
// expm1
NNVM_REGISTER_OP(expm1)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::expm1>)
.set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::expm1>);
NNVM_REGISTER_OP(_backward_expm1)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::exp> >);
// gamma
NNVM_REGISTER_OP(gamma)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>);
NNVM_REGISTER_OP(_backward_gamma)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gamma_grad> >);
// gammaln
NNVM_REGISTER_OP(gammaln)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>);
NNVM_REGISTER_OP(_backward_gammaln)
.set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<
gpu, unary_bwd<mshadow_op::gammaln_grad> >);
// logical not
NNVM_REGISTER_OP(logical_not)
.set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>);
} // namespace op
} // namespace mxnet
|
4928a8a70dbb3e25dd5baf703d1f9419f4051705.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define XBLOCK_SIZE 32
#define YBLOCK_SIZE 24
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY,int width,int count, int *output) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float c_re = lowerX + i * stepX;
float c_im = lowerY + j * stepY;
int idx;
float z_re = c_re, z_im = c_im;
for (idx = 0; idx < count; ++idx)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
int index = (j * width + i);
output[index] = idx;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int size = resX * resY * sizeof(int);
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// allocate memory in host & device
int *host_mem, *dev_mem;
host_mem = (int *)malloc(size);
hipMalloc((void **)&dev_mem, size);
// GPU processing
dim3 num_block(resX / XBLOCK_SIZE, resY / YBLOCK_SIZE);
dim3 block_size(XBLOCK_SIZE, YBLOCK_SIZE);
hipLaunchKernelGGL(( mandelKernel), dim3(num_block), dim3(block_size), 0, 0, lowerX, lowerY, stepX, stepY, resX, maxIterations, dev_mem);
hipDeviceSynchronize();
// GPU translate result data back
hipMemcpy(host_mem, dev_mem, size, hipMemcpyDeviceToHost);
memcpy(img, host_mem, size);
free(host_mem);
hipFree(dev_mem);
}
|
4928a8a70dbb3e25dd5baf703d1f9419f4051705.cu
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define XBLOCK_SIZE 32
#define YBLOCK_SIZE 24
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY,int width,int count, int *output) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float c_re = lowerX + i * stepX;
float c_im = lowerY + j * stepY;
int idx;
float z_re = c_re, z_im = c_im;
for (idx = 0; idx < count; ++idx)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
int index = (j * width + i);
output[index] = idx;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int size = resX * resY * sizeof(int);
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// allocate memory in host & device
int *host_mem, *dev_mem;
host_mem = (int *)malloc(size);
cudaMalloc((void **)&dev_mem, size);
// GPU processing
dim3 num_block(resX / XBLOCK_SIZE, resY / YBLOCK_SIZE);
dim3 block_size(XBLOCK_SIZE, YBLOCK_SIZE);
mandelKernel<<<num_block, block_size>>>(lowerX, lowerY, stepX, stepY, resX, maxIterations, dev_mem);
cudaDeviceSynchronize();
// GPU translate result data back
cudaMemcpy(host_mem, dev_mem, size, cudaMemcpyDeviceToHost);
memcpy(img, host_mem, size);
free(host_mem);
cudaFree(dev_mem);
}
|
8651842e9b1032a115bb2eac29d21b876069f433.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "CudaDeviceKeys.h"
#include "CudaDeviceKeys.cuh"
#include "secp256k1.cuh"
__constant__ unsigned int *_xPtr[1];
__constant__ unsigned int *_yPtr[1];
__device__ unsigned int *ec::getXPtr()
{
return _xPtr[0];
}
__device__ unsigned int *ec::getYPtr()
{
return _yPtr[0];
}
__global__ void multiplyStepKernel(unsigned int *privateKeys, int pointsPerThread, int step, unsigned int *chain, const unsigned int *gxPtr, const unsigned int *gyPtr);
int CudaDeviceKeys::getIndex(int block, int thread, int idx)
{
// Total number of threads
int totalThreads = _blocks * _threads;
int base = idx * totalThreads;
// Global ID of the current thread
int threadId = block * _threads + thread;
return base + threadId;
}
void CudaDeviceKeys::splatBigInt(unsigned int *dest, int block, int thread, int idx, const secp256k1::uint256 &i)
{
unsigned int value[8] = {0};
i.exportWords(value, 8, secp256k1::uint256::BigEndian);
int totalThreads = _blocks * _threads;
int threadId = block * _threads * 4 + thread * 4;
int base = idx * _blocks * _threads * 8;
int index = base + threadId;
for (int k = 0; k < 4; k++) {
dest[index] = value[k];
index++;
}
index = base + totalThreads * 4 + threadId;
for (int k = 4; k < 8; k++) {
dest[index] = value[k];
index++;
}
}
secp256k1::uint256 CudaDeviceKeys::readBigInt(unsigned int *src, int block, int thread, int idx)
{
unsigned int value[8] = {0};
int totalThreads = _blocks * _threads;
int threadId = block * _threads * 4 + thread * 4;
int base = idx * _blocks * _threads * 8;
int index = base + threadId;
for (int k = 0; k < 4; k++) {
value[k] = src[index];
index++;
}
index = base + totalThreads * 4 + threadId;
for (int k = 4; k < 8; k++) {
value[k] = src[index];
index++;
}
secp256k1::uint256 v(value, secp256k1::uint256::BigEndian);
return v;
}
/**
* Allocates device memory for storing the multiplication chain used in
* the batch inversion operation
*/
hipError_t CudaDeviceKeys::allocateChainBuf(unsigned int count)
{
hipError_t err = hipMalloc(&_devChain, count * sizeof(unsigned int) * 8);
if (err) {
return err;
}
return err;
}
hipError_t CudaDeviceKeys::initializeBasePoints()
{
// generate a table of points G, 2G, 4G, 8G...(2^255)G
std::vector <secp256k1::ecpoint> table;
table.push_back(secp256k1::G());
for (int i = 1; i < 256; i++) {
secp256k1::ecpoint p = doublePoint(table[i - 1]);
if (!pointExists(p)) {
throw std::string("Point does not exist!");
}
table.push_back(p);
}
unsigned int count = 256;
hipError_t err = hipMalloc(&_devBasePointX, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = hipMalloc(&_devBasePointY, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
unsigned int *tmpX = new unsigned int[count * 8];
unsigned int *tmpY = new unsigned int[count * 8];
for (int i = 0; i < 256; i++) {
unsigned int bufX[8];
unsigned int bufY[8];
table[i].x.exportWords(bufX, 8, secp256k1::uint256::BigEndian);
table[i].y.exportWords(bufY, 8, secp256k1::uint256::BigEndian);
for (int j = 0; j < 8; j++) {
tmpX[i * 8 + j] = bufX[j];
tmpY[i * 8 + j] = bufY[j];
}
}
err = hipMemcpy(_devBasePointX, tmpX, count * 8 * sizeof(unsigned int), hipMemcpyHostToDevice);
delete[] tmpX;
if (err) {
delete[] tmpY;
return err;
}
err = hipMemcpy(_devBasePointY, tmpY, count * 8 * sizeof(unsigned int), hipMemcpyHostToDevice);
delete[] tmpY;
return err;
}
hipError_t CudaDeviceKeys::initializePublicKeys(size_t count)
{
// Allocate X array
hipError_t err = hipMalloc(&_devX, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear X array
err = hipMemset(_devX, -1, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Allocate Y array
err = hipMalloc(&_devY, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear Y array
err = hipMemset(_devY, -1, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = hipMemcpyToSymbol(_xPtr, &_devX, sizeof(unsigned int *));
if (err) {
return err;
}
err = hipMemcpyToSymbol(_yPtr, &_devY, sizeof(unsigned int *));
return err;
}
hipError_t CudaDeviceKeys::init(int blocks, int threads, int pointsPerThread, const std::vector <secp256k1::uint256> &privateKeys)
{
_blocks = blocks;
_threads = threads;
_pointsPerThread = pointsPerThread;
size_t count = privateKeys.size();
// Allocate space for public keys on device
hipError_t err = initializePublicKeys(count);
if (err) {
return err;
}
err = initializeBasePoints();
if (err) {
return err;
}
// Allocate private keys on device
err = hipMalloc(&_devPrivate, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear private keys
err = hipMemset(_devPrivate, 0, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = allocateChainBuf(_threads * _blocks * _pointsPerThread);
if (err) {
return err;
}
// Copy private keys to system memory buffer
unsigned int *tmp = new unsigned int[count * 8];
for (int block = 0; block < _blocks; block++) {
for (int thread = 0; thread < _threads; thread++) {
for (int idx = 0; idx < _pointsPerThread; idx++) {
int index = getIndex(block, thread, idx);
splatBigInt(tmp, block, thread, idx, privateKeys[index]);
}
}
}
// Copy private keys to device memory
err = hipMemcpy(_devPrivate, tmp, count * sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
delete[] tmp;
if (err) {
return err;
}
return hipSuccess;
}
void CudaDeviceKeys::clearPublicKeys()
{
hipFree(_devX);
hipFree(_devY);
_devX = NULL;
_devY = NULL;
}
void CudaDeviceKeys::clearPrivateKeys()
{
hipFree(_devBasePointX);
hipFree(_devBasePointY);
hipFree(_devPrivate);
hipFree(_devChain);
_devChain = NULL;
_devBasePointX = NULL;
_devBasePointY = NULL;
_devPrivate = NULL;
}
hipError_t CudaDeviceKeys::doStep()
{
hipLaunchKernelGGL(( multiplyStepKernel) , dim3(_blocks), dim3(_threads), 0, 0, _devPrivate, _pointsPerThread, _step, _devChain, _devBasePointX, _devBasePointY);
// Wait for kernel to complete
hipError_t err = hipDeviceSynchronize();
fflush(stdout);
_step++;
return err;
}
__global__ void multiplyStepKernel(unsigned int *privateKeys, int pointsPerThread, int step, unsigned int *chain, const unsigned int *gxPtr, const unsigned int *gyPtr)
{
unsigned int *xPtr = ec::getXPtr();
unsigned int *yPtr = ec::getYPtr();
unsigned int gx[8];
unsigned int gy[8];
for (int i = 0; i < 8; i++) {
gx[i] = gxPtr[step * 8 + i];
gy[i] = gyPtr[step * 8 + i];
}
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = {0, 0, 0, 0, 0, 0, 0, 1};
int batchIdx = 0;
for (int i = 0; i < pointsPerThread; i++) {
unsigned int p[8];
readInt(privateKeys, i, p);
unsigned int bit = p[7 - step / 32] & 1 << ((step % 32));
unsigned int x[8];
readInt(xPtr, i, x);
if (bit != 0) {
if (!isInfinity(x)) {
beginBatchAddWithDouble(gx, gy, xPtr, chain, i, batchIdx, inverse);
batchIdx++;
}
}
}
doBatchInverse(inverse);
for (int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
unsigned int p[8];
readInt(privateKeys, i, p);
unsigned int bit = p[7 - step / 32] & 1 << ((step % 32));
unsigned int x[8];
readInt(xPtr, i, x);
bool infinity = isInfinity(x);
if (bit != 0) {
if (!infinity) {
batchIdx--;
completeBatchAddWithDouble(gx, gy, xPtr, yPtr, i, batchIdx, chain, inverse, newX, newY);
} else {
copyBigInt(gx, newX);
copyBigInt(gy, newY);
}
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
}
bool CudaDeviceKeys::selfTest(const std::vector <secp256k1::uint256> &privateKeys)
{
unsigned int numPoints = _threads * _blocks * _pointsPerThread;
unsigned int *xBuf = new unsigned int[numPoints * 8];
unsigned int *yBuf = new unsigned int[numPoints * 8];
hipError_t err = hipMemcpy(xBuf, _devX, sizeof(unsigned int) * 8 * numPoints, hipMemcpyDeviceToHost);
err = hipMemcpy(yBuf, _devY, sizeof(unsigned int) * 8 * numPoints, hipMemcpyDeviceToHost);
for (int block = 0; block < _blocks; block++) {
for (int thread = 0; thread < _threads; thread++) {
for (int idx = 0; idx < _pointsPerThread; idx++) {
int index = getIndex(block, thread, idx);
secp256k1::uint256 privateKey = privateKeys[index];
secp256k1::uint256 x = readBigInt(xBuf, block, thread, idx);
secp256k1::uint256 y = readBigInt(yBuf, block, thread, idx);
secp256k1::ecpoint p1(x, y);
secp256k1::ecpoint p2 = secp256k1::multiplyPoint(privateKey, secp256k1::G());
if (!secp256k1::pointExists(p1)) {
throw std::string("Validation failed: invalid point");
}
if (!secp256k1::pointExists(p2)) {
throw std::string("Validation failed: invalid point");
}
if (!(p1 == p2)) {
throw std::string("Validation failed: points do not match");
}
}
}
}
return true;
}
|
8651842e9b1032a115bb2eac29d21b876069f433.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "CudaDeviceKeys.h"
#include "CudaDeviceKeys.cuh"
#include "secp256k1.cuh"
__constant__ unsigned int *_xPtr[1];
__constant__ unsigned int *_yPtr[1];
__device__ unsigned int *ec::getXPtr()
{
return _xPtr[0];
}
__device__ unsigned int *ec::getYPtr()
{
return _yPtr[0];
}
__global__ void multiplyStepKernel(unsigned int *privateKeys, int pointsPerThread, int step, unsigned int *chain, const unsigned int *gxPtr, const unsigned int *gyPtr);
int CudaDeviceKeys::getIndex(int block, int thread, int idx)
{
// Total number of threads
int totalThreads = _blocks * _threads;
int base = idx * totalThreads;
// Global ID of the current thread
int threadId = block * _threads + thread;
return base + threadId;
}
void CudaDeviceKeys::splatBigInt(unsigned int *dest, int block, int thread, int idx, const secp256k1::uint256 &i)
{
unsigned int value[8] = {0};
i.exportWords(value, 8, secp256k1::uint256::BigEndian);
int totalThreads = _blocks * _threads;
int threadId = block * _threads * 4 + thread * 4;
int base = idx * _blocks * _threads * 8;
int index = base + threadId;
for (int k = 0; k < 4; k++) {
dest[index] = value[k];
index++;
}
index = base + totalThreads * 4 + threadId;
for (int k = 4; k < 8; k++) {
dest[index] = value[k];
index++;
}
}
secp256k1::uint256 CudaDeviceKeys::readBigInt(unsigned int *src, int block, int thread, int idx)
{
unsigned int value[8] = {0};
int totalThreads = _blocks * _threads;
int threadId = block * _threads * 4 + thread * 4;
int base = idx * _blocks * _threads * 8;
int index = base + threadId;
for (int k = 0; k < 4; k++) {
value[k] = src[index];
index++;
}
index = base + totalThreads * 4 + threadId;
for (int k = 4; k < 8; k++) {
value[k] = src[index];
index++;
}
secp256k1::uint256 v(value, secp256k1::uint256::BigEndian);
return v;
}
/**
* Allocates device memory for storing the multiplication chain used in
* the batch inversion operation
*/
cudaError_t CudaDeviceKeys::allocateChainBuf(unsigned int count)
{
cudaError_t err = cudaMalloc(&_devChain, count * sizeof(unsigned int) * 8);
if (err) {
return err;
}
return err;
}
cudaError_t CudaDeviceKeys::initializeBasePoints()
{
// generate a table of points G, 2G, 4G, 8G...(2^255)G
std::vector <secp256k1::ecpoint> table;
table.push_back(secp256k1::G());
for (int i = 1; i < 256; i++) {
secp256k1::ecpoint p = doublePoint(table[i - 1]);
if (!pointExists(p)) {
throw std::string("Point does not exist!");
}
table.push_back(p);
}
unsigned int count = 256;
cudaError_t err = cudaMalloc(&_devBasePointX, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = cudaMalloc(&_devBasePointY, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
unsigned int *tmpX = new unsigned int[count * 8];
unsigned int *tmpY = new unsigned int[count * 8];
for (int i = 0; i < 256; i++) {
unsigned int bufX[8];
unsigned int bufY[8];
table[i].x.exportWords(bufX, 8, secp256k1::uint256::BigEndian);
table[i].y.exportWords(bufY, 8, secp256k1::uint256::BigEndian);
for (int j = 0; j < 8; j++) {
tmpX[i * 8 + j] = bufX[j];
tmpY[i * 8 + j] = bufY[j];
}
}
err = cudaMemcpy(_devBasePointX, tmpX, count * 8 * sizeof(unsigned int), cudaMemcpyHostToDevice);
delete[] tmpX;
if (err) {
delete[] tmpY;
return err;
}
err = cudaMemcpy(_devBasePointY, tmpY, count * 8 * sizeof(unsigned int), cudaMemcpyHostToDevice);
delete[] tmpY;
return err;
}
cudaError_t CudaDeviceKeys::initializePublicKeys(size_t count)
{
// Allocate X array
cudaError_t err = cudaMalloc(&_devX, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear X array
err = cudaMemset(_devX, -1, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Allocate Y array
err = cudaMalloc(&_devY, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear Y array
err = cudaMemset(_devY, -1, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = cudaMemcpyToSymbol(_xPtr, &_devX, sizeof(unsigned int *));
if (err) {
return err;
}
err = cudaMemcpyToSymbol(_yPtr, &_devY, sizeof(unsigned int *));
return err;
}
cudaError_t CudaDeviceKeys::init(int blocks, int threads, int pointsPerThread, const std::vector <secp256k1::uint256> &privateKeys)
{
_blocks = blocks;
_threads = threads;
_pointsPerThread = pointsPerThread;
size_t count = privateKeys.size();
// Allocate space for public keys on device
cudaError_t err = initializePublicKeys(count);
if (err) {
return err;
}
err = initializeBasePoints();
if (err) {
return err;
}
// Allocate private keys on device
err = cudaMalloc(&_devPrivate, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
// Clear private keys
err = cudaMemset(_devPrivate, 0, sizeof(unsigned int) * count * 8);
if (err) {
return err;
}
err = allocateChainBuf(_threads * _blocks * _pointsPerThread);
if (err) {
return err;
}
// Copy private keys to system memory buffer
unsigned int *tmp = new unsigned int[count * 8];
for (int block = 0; block < _blocks; block++) {
for (int thread = 0; thread < _threads; thread++) {
for (int idx = 0; idx < _pointsPerThread; idx++) {
int index = getIndex(block, thread, idx);
splatBigInt(tmp, block, thread, idx, privateKeys[index]);
}
}
}
// Copy private keys to device memory
err = cudaMemcpy(_devPrivate, tmp, count * sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
delete[] tmp;
if (err) {
return err;
}
return cudaSuccess;
}
void CudaDeviceKeys::clearPublicKeys()
{
cudaFree(_devX);
cudaFree(_devY);
_devX = NULL;
_devY = NULL;
}
void CudaDeviceKeys::clearPrivateKeys()
{
cudaFree(_devBasePointX);
cudaFree(_devBasePointY);
cudaFree(_devPrivate);
cudaFree(_devChain);
_devChain = NULL;
_devBasePointX = NULL;
_devBasePointY = NULL;
_devPrivate = NULL;
}
cudaError_t CudaDeviceKeys::doStep()
{
multiplyStepKernel <<<_blocks, _threads>>>(_devPrivate, _pointsPerThread, _step, _devChain, _devBasePointX, _devBasePointY);
// Wait for kernel to complete
cudaError_t err = cudaDeviceSynchronize();
fflush(stdout);
_step++;
return err;
}
__global__ void multiplyStepKernel(unsigned int *privateKeys, int pointsPerThread, int step, unsigned int *chain, const unsigned int *gxPtr, const unsigned int *gyPtr)
{
unsigned int *xPtr = ec::getXPtr();
unsigned int *yPtr = ec::getYPtr();
unsigned int gx[8];
unsigned int gy[8];
for (int i = 0; i < 8; i++) {
gx[i] = gxPtr[step * 8 + i];
gy[i] = gyPtr[step * 8 + i];
}
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = {0, 0, 0, 0, 0, 0, 0, 1};
int batchIdx = 0;
for (int i = 0; i < pointsPerThread; i++) {
unsigned int p[8];
readInt(privateKeys, i, p);
unsigned int bit = p[7 - step / 32] & 1 << ((step % 32));
unsigned int x[8];
readInt(xPtr, i, x);
if (bit != 0) {
if (!isInfinity(x)) {
beginBatchAddWithDouble(gx, gy, xPtr, chain, i, batchIdx, inverse);
batchIdx++;
}
}
}
doBatchInverse(inverse);
for (int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
unsigned int p[8];
readInt(privateKeys, i, p);
unsigned int bit = p[7 - step / 32] & 1 << ((step % 32));
unsigned int x[8];
readInt(xPtr, i, x);
bool infinity = isInfinity(x);
if (bit != 0) {
if (!infinity) {
batchIdx--;
completeBatchAddWithDouble(gx, gy, xPtr, yPtr, i, batchIdx, chain, inverse, newX, newY);
} else {
copyBigInt(gx, newX);
copyBigInt(gy, newY);
}
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
}
bool CudaDeviceKeys::selfTest(const std::vector <secp256k1::uint256> &privateKeys)
{
unsigned int numPoints = _threads * _blocks * _pointsPerThread;
unsigned int *xBuf = new unsigned int[numPoints * 8];
unsigned int *yBuf = new unsigned int[numPoints * 8];
cudaError_t err = cudaMemcpy(xBuf, _devX, sizeof(unsigned int) * 8 * numPoints, cudaMemcpyDeviceToHost);
err = cudaMemcpy(yBuf, _devY, sizeof(unsigned int) * 8 * numPoints, cudaMemcpyDeviceToHost);
for (int block = 0; block < _blocks; block++) {
for (int thread = 0; thread < _threads; thread++) {
for (int idx = 0; idx < _pointsPerThread; idx++) {
int index = getIndex(block, thread, idx);
secp256k1::uint256 privateKey = privateKeys[index];
secp256k1::uint256 x = readBigInt(xBuf, block, thread, idx);
secp256k1::uint256 y = readBigInt(yBuf, block, thread, idx);
secp256k1::ecpoint p1(x, y);
secp256k1::ecpoint p2 = secp256k1::multiplyPoint(privateKey, secp256k1::G());
if (!secp256k1::pointExists(p1)) {
throw std::string("Validation failed: invalid point");
}
if (!secp256k1::pointExists(p2)) {
throw std::string("Validation failed: invalid point");
}
if (!(p1 == p2)) {
throw std::string("Validation failed: points do not match");
}
}
}
}
return true;
}
|
e8f6fd7ad45f2d4c4bd87e45583c7c2aa43703a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int NDIM>
__global__ void SliceForwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire,
T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
sliced[i] = entire[offset];
}
}
template<typename T, int NDIM>
__global__ void SliceBackwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire,
const T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
entire[offset] = sliced[i];
}
}
template<typename T, int NDIM>
void LaunchSliceForward(ep::Stream* stream, const SliceParams& params, const T* entire, T* sliced) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
if (elem_cnt == 0) { return; }
hipLaunchKernelGGL(( SliceForwardGpu<T, NDIM>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T, int NDIM>
void LaunchSliceBackward(ep::Stream* stream, const SliceParams& params, const T* sliced,
T* entire) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
hipLaunchKernelGGL(( SliceBackwardGpu<T, NDIM>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T>
struct SliceSwitchUtil final {
#define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N>
#define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \
DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward)
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward)
#undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD
#undef MAKE_SLICE_SWITCH_ENTRY
};
template<typename T>
size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T))
| (params.size[last_dim] * sizeof(T))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced));
if ((mask & 0xF) == 0) {
return 16;
} else if ((mask & 0x7) == 0) {
return 8;
} else if ((mask & 0x3) == 0) {
return 4;
} else if ((mask & 0x1) == 0) {
return 2;
} else {
return 1;
}
}
template<typename T>
void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size,
SliceParams* packed_params) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
if (params.step[last_dim] == 1) {
*pack_size = GetPackSize<T>(params, entire, sliced);
CHECK_GE(*pack_size, sizeof(T));
const int64_t elem_per_pack = *pack_size / sizeof(T);
*packed_params = params;
packed_params->dims[last_dim] /= elem_per_pack;
packed_params->start[last_dim] /= elem_per_pack;
packed_params->size[last_dim] /= elem_per_pack;
} else {
*pack_size = sizeof(T);
*packed_params = params;
}
}
} // namespace
template<typename T>
struct SliceKernelUtil<DeviceType::kGPU, T> {
static void Forward(ep::Stream* stream, const SliceParams& params, const T* entire, T* sliced) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced));
} else {
UNIMPLEMENTED();
}
}
static void Backward(ep::Stream* stream, const SliceParams& params, const T* sliced, T* entire) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire));
} else {
UNIMPLEMENTED();
}
}
};
INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU)
INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16)
} // namespace oneflow
|
e8f6fd7ad45f2d4c4bd87e45583c7c2aa43703a1.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/slice_util.h"
#include "oneflow/core/common/switch_func.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int NDIM>
__global__ void SliceForwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, const T* entire,
T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
sliced[i] = entire[offset];
}
}
template<typename T, int NDIM>
__global__ void SliceBackwardGpu(const int n, SliceParams params,
SliceIndexHelper<NDIM> entire_idx_cvtr,
SliceIndexHelper<NDIM> sliced_idx_cvtr, T* entire,
const T* sliced) {
CUDA_1D_KERNEL_LOOP(i, n) {
int64_t offset = SliceOffsetToEntireOffset<NDIM>(i, params, entire_idx_cvtr, sliced_idx_cvtr);
entire[offset] = sliced[i];
}
}
template<typename T, int NDIM>
void LaunchSliceForward(ep::Stream* stream, const SliceParams& params, const T* entire, T* sliced) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
if (elem_cnt == 0) { return; }
SliceForwardGpu<T, NDIM><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T, int NDIM>
void LaunchSliceBackward(ep::Stream* stream, const SliceParams& params, const T* sliced,
T* entire) {
CHECK_EQ(params.ndim, NDIM);
int64_t elem_cnt = params.elem_cnt();
SliceIndexHelper<NDIM> entire_idx_cvtr(params.dims);
SliceIndexHelper<NDIM> sliced_idx_cvtr(params.size);
SliceBackwardGpu<T, NDIM><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
elem_cnt, params, entire_idx_cvtr, sliced_idx_cvtr, entire, sliced);
}
template<typename T>
struct SliceSwitchUtil final {
#define MAKE_SLICE_SWITCH_ENTRY(func_name, N) func_name<T, N>
#define DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(func_name) \
DEFINE_STATIC_SWITCH_FUNC(void, func_name, MAKE_SLICE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ))
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceForward)
DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD(LaunchSliceBackward)
#undef DEFINE_SLICE_SWITCH_UTIL_STATIC_METHOD
#undef MAKE_SLICE_SWITCH_ENTRY
};
template<typename T>
size_t GetPackSize(const SliceParams& params, const T* entire, const T* sliced) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
const int64_t mask = (params.dims[last_dim] * sizeof(T)) | (params.start[last_dim] * sizeof(T))
| (params.size[last_dim] * sizeof(T))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(entire))
| static_cast<int64_t>(reinterpret_cast<uintptr_t>(sliced));
if ((mask & 0xF) == 0) {
return 16;
} else if ((mask & 0x7) == 0) {
return 8;
} else if ((mask & 0x3) == 0) {
return 4;
} else if ((mask & 0x1) == 0) {
return 2;
} else {
return 1;
}
}
template<typename T>
void GetPackedParams(const SliceParams& params, const T* entire, const T* sliced, size_t* pack_size,
SliceParams* packed_params) {
CHECK_GT(params.ndim, 0);
const int64_t last_dim = params.ndim - 1;
if (params.step[last_dim] == 1) {
*pack_size = GetPackSize<T>(params, entire, sliced);
CHECK_GE(*pack_size, sizeof(T));
const int64_t elem_per_pack = *pack_size / sizeof(T);
*packed_params = params;
packed_params->dims[last_dim] /= elem_per_pack;
packed_params->start[last_dim] /= elem_per_pack;
packed_params->size[last_dim] /= elem_per_pack;
} else {
*pack_size = sizeof(T);
*packed_params = params;
}
}
} // namespace
template<typename T>
struct SliceKernelUtil<DeviceType::kGPU, T> {
static void Forward(ep::Stream* stream, const SliceParams& params, const T* entire, T* sliced) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint8_t*>(entire), reinterpret_cast<uint8_t*>(sliced));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint16_t*>(entire), reinterpret_cast<uint16_t*>(sliced));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint32_t*>(entire), reinterpret_cast<uint32_t*>(sliced));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint64_t*>(entire), reinterpret_cast<uint64_t*>(sliced));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceForward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const ulonglong2*>(entire), reinterpret_cast<ulonglong2*>(sliced));
} else {
UNIMPLEMENTED();
}
}
static void Backward(ep::Stream* stream, const SliceParams& params, const T* sliced, T* entire) {
SliceParams fold_slice_params = FoldContiguousFullSliceDimensions(params);
size_t pack_size;
SliceParams packed_params{};
GetPackedParams<T>(fold_slice_params, entire, sliced, &pack_size, &packed_params);
if (pack_size == 1) {
SliceSwitchUtil<uint8_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint8_t*>(sliced), reinterpret_cast<uint8_t*>(entire));
} else if (pack_size == 2) {
SliceSwitchUtil<uint16_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint16_t*>(sliced), reinterpret_cast<uint16_t*>(entire));
} else if (pack_size == 4) {
SliceSwitchUtil<uint32_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint32_t*>(sliced), reinterpret_cast<uint32_t*>(entire));
} else if (pack_size == 8) {
SliceSwitchUtil<uint64_t>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const uint64_t*>(sliced), reinterpret_cast<uint64_t*>(entire));
} else if (pack_size == 16) {
SliceSwitchUtil<ulonglong2>::SwitchLaunchSliceBackward(
SwitchCase(packed_params.ndim), stream, packed_params,
reinterpret_cast<const ulonglong2*>(sliced), reinterpret_cast<ulonglong2*>(entire));
} else {
UNIMPLEMENTED();
}
}
};
INSTANTIATE_SLICE_KERNEL_UTIL_WITH_DEVICE(DeviceType::kGPU)
INSTANTIATE_SLICE_KERNEL_UTIL(DeviceType::kGPU, float16)
} // namespace oneflow
|
7fd21c38d8879978032691afa12445b45d2be89b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultTile(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nr; i += blockDim.x) {
float sum = 0;
int bcold = -1;
int jdone = 0;
for (int j = jstart; j < jend; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < kk && bcol >= 0 && bcol < nc) {
if (jdone > 0 && bcol != bcold) {
atomicAdd(&C[i + ldc * bcold], sum);
sum = 0;
}
jdone++;
sum += A[i + lda * brow] * Bdata[j];
bcold = bcol;
}
}
if (jdone > 0) atomicAdd(&C[i + ldc * bcold], sum);
}
}
__global__ void __dsmultTilex(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
int bcold = -1;
int jdone = 0;
if (threadIdx.x < nr) {
for (int j = jstart; j < jend ; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < kk && bcol >= 0 && bcol < nc) {
if (jdone > 0 && bcol != bcold) {
atomicAdd(&C[threadIdx.x + ldc * bcold], sum);
sum = 0;
}
jdone++;
sum += A[threadIdx.x + lda * brow] * Bdata[j];
bcold = bcol;
}
}
if (jdone > 0) atomicAdd(&C[threadIdx.x + ldc * bcold], sum);
}
}
__global__ void __dsmultTileT(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nr; i += blockDim.x) {
float aval = 0;
int bcold = -1;
for (int j = jstart; j < jend; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < nc && bcol >= 0 && bcol < nr) {
if (bcol != bcold) {
aval = A[i + lda * bcol];
}
atomicAdd(&C[i + ldc * brow], aval * Bdata[j]);
bcold = bcol;
}
}
}
}
__global__ void __dsmultTileTx(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
int bcold = -1;
if (threadIdx.x < nr) {
for (int j = jstart; j < jend ; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < nc && bcol >= 0 && bcol < nr) {
if (bcol != bcold) {
aval = A[threadIdx.x + lda * bcol];
}
atomicAdd(&C[threadIdx.x + ldc * brow], aval * Bdata[j]);
bcold = bcol;
}
}
}
}
int dsmultTile(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
if (nr < 128) {
int nt = max(1, min(nc/2, 256/nr));
dim3 threadDim(nr, nt, 1);
int nblocks = min(MAXXGRID, max(1, nc/nt));
hipLaunchKernelGGL(( __dsmultTilex), dim3(nblocks),dim3(threadDim), 0, 0, nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
} else {
int nthreads = min(1024, nr);
int nblocks = min(MAXXGRID, nc);
hipLaunchKernelGGL(( __dsmultTile), dim3(nblocks),dim3(nthreads), 0, 0, nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmultTileT(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
if (nr < 128) {
int nt = max(1, min(nc/2, 256/nr));
dim3 threadDim(nr, nt, 1);
int nblocks = min(MAXXGRID, max(1, nc/nt));
hipLaunchKernelGGL(( __dsmultTileTx), dim3(nblocks),dim3(threadDim), 0, 0, nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
} else {
int nthreads = min(1024, nr);
int nblocks = min(MAXXGRID, nc);
hipLaunchKernelGGL(( __dsmultTileT), dim3(nblocks),dim3(nthreads), 0, 0, nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) {
hipLaunchKernelGGL(( __dsmult), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
hipLaunchKernelGGL(( __dsmultx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
hipLaunchKernelGGL(( __dsmultTx), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __dsmultT), dim3(nblocks),dim3(nthreads), 0, 0, nrows, nnz, A, Bdata, Bir, Bic, C);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) {
int nthreads = max(32,min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
hipLaunchKernelGGL(( __spsum1), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
} else {
hipLaunchKernelGGL(( __spsum2), dim3(nblks),dim3(nthreads), 0, 0, nrows, ncols, nnz, Air, Aic, P, B);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, float initval, int opn);
__global__ void __reduce1iop(int nrows, int ncols, int *A, int *B, int initval, int opn);
__global__ void __reduce1lop(int nrows, int ncols, long long *A, long long *B, long long initval, int opn);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {
__shared__ float merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
float user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {}
#endif
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
hipLaunchKernelGGL(( __dds), dim3(nblocks),dim3(blockDims), 0, 0, nrows, nnz, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
hipLaunchKernelGGL(( __dds0), dim3(nblocks),dim3(blockDims), 0, 0, nrows, ncols, A, B, Cir, Cic, P);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \
int ldc, int d, int nrows, int ncols, float p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
float va, vb, vc; \
float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// hipSetDevice(ithread);
if (p == 0.0f) {
hipLaunchKernelGGL(( __linfdist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
hipLaunchKernelGGL(( __l1dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
hipLaunchKernelGGL(( __l2dist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
hipLaunchKernelGGL(( __minkowskidist), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
hipLaunchKernelGGL(( __msum), dim3(griddim),dim3(blockdim), 0, 0, A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
float accum = 0.0f;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) {
float accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
hipLaunchKernelGGL(( __dmvt), dim3(griddims),dim3(blockdims), 0, 0, a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
hipLaunchKernelGGL(( __dmv0), dim3(nbx),dim3(ntx), 0, 0, a, nrows, ncols, tstep, b, c);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
|
7fd21c38d8879978032691afa12445b45d2be89b.cu
|
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
__global__ void __dsmult(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[i + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[i + nrows * Bic[j]], sum);
sum = 0;
}
}
}
}
__global__ void __dsmultx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
for (int j = jstart; j < jend ; j++) {
sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j];
if (j == jend-1 || Bic[j] != Bic[j+1]) {
atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum);
sum = 0;
}
}
}
int dsmult(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultTile(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nr; i += blockDim.x) {
float sum = 0;
int bcold = -1;
int jdone = 0;
for (int j = jstart; j < jend; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < kk && bcol >= 0 && bcol < nc) {
if (jdone > 0 && bcol != bcold) {
atomicAdd(&C[i + ldc * bcold], sum);
sum = 0;
}
jdone++;
sum += A[i + lda * brow] * Bdata[j];
bcold = bcol;
}
}
if (jdone > 0) atomicAdd(&C[i + ldc * bcold], sum);
}
}
__global__ void __dsmultTilex(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float sum = 0;
int bcold = -1;
int jdone = 0;
if (threadIdx.x < nr) {
for (int j = jstart; j < jend ; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < kk && bcol >= 0 && bcol < nc) {
if (jdone > 0 && bcol != bcold) {
atomicAdd(&C[threadIdx.x + ldc * bcold], sum);
sum = 0;
}
jdone++;
sum += A[threadIdx.x + lda * brow] * Bdata[j];
bcold = bcol;
}
}
if (jdone > 0) atomicAdd(&C[threadIdx.x + ldc * bcold], sum);
}
}
__global__ void __dsmultTileT(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nr; i += blockDim.x) {
float aval = 0;
int bcold = -1;
for (int j = jstart; j < jend; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < nc && bcol >= 0 && bcol < nr) {
if (bcol != bcold) {
aval = A[i + lda * bcol];
}
atomicAdd(&C[i + ldc * brow], aval * Bdata[j]);
bcold = bcol;
}
}
}
}
__global__ void __dsmultTileTx(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
int bcold = -1;
if (threadIdx.x < nr) {
for (int j = jstart; j < jend ; j++) {
int brow = Bir[j] - broff;
int bcol = Bic[j] - bcoff;
if (brow >= 0 && brow < nc && bcol >= 0 && bcol < nr) {
if (bcol != bcold) {
aval = A[threadIdx.x + lda * bcol];
}
atomicAdd(&C[threadIdx.x + ldc * brow], aval * Bdata[j]);
bcold = bcol;
}
}
}
}
int dsmultTile(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
if (nr < 128) {
int nt = max(1, min(nc/2, 256/nr));
dim3 threadDim(nr, nt, 1);
int nblocks = min(MAXXGRID, max(1, nc/nt));
__dsmultTilex<<<nblocks,threadDim>>>(nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
} else {
int nthreads = min(1024, nr);
int nblocks = min(MAXXGRID, nc);
__dsmultTile<<<nblocks,nthreads>>>(nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmultTileT(int nr, int nc, int kk, int nnz, float *A, int lda, float *Bdata, int *Bir, int *Bic, int broff, int bcoff, float *C, int ldc) {
if (nr < 128) {
int nt = max(1, min(nc/2, 256/nr));
dim3 threadDim(nr, nt, 1);
int nblocks = min(MAXXGRID, max(1, nc/nt));
__dsmultTileTx<<<nblocks,threadDim>>>(nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
} else {
int nthreads = min(1024, nr);
int nblocks = min(MAXXGRID, nc);
__dsmultTileT<<<nblocks,nthreads>>>(nr, nc, kk, nnz, A, lda, Bdata, Bir, Bic, broff, bcoff, C, ldc);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmult_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreads) {
__dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dsmultx_tune(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C, int nblocks, int nthreadsx, int nthreadsy) {
dim3 threadDim(nthreadsx, nthreadsy, 1);
__dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dsmultT(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = threadIdx.x; i < nrows; i += blockDim.x) {
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[i + nrows * Bic[j]];
}
atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]);
}
}
}
__global__ void __dsmultTx(int nrows, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
int bid = threadIdx.y + blockDim.y * blockIdx.x;
int nb = blockDim.y * gridDim.x;
int jstart = ((long long)bid) * nnz / nb;
int jend = ((long long)(bid + 1)) * nnz / nb;
float aval = 0;
for (int j = jstart; j < jend ; j++) {
if (j == jstart || Bic[j-1] != Bic[j]) {
aval = A[threadIdx.x + nrows * Bic[j]];
}
atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]);
}
}
int dsmultT(int nrows, int ncols, int nnz, float *A, float *Bdata, int *Bir, int *Bic, float *C) {
if (nrows < 128) {
int nt = max(1, min(ncols/2, 256/nrows));
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, max(1, ncols/nt));
__dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
} else {
int nthreads = min(1024, nrows);
int nblocks = min(MAXXGRID, ncols);
__dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Aic[i]], P[i]);
}
}
__global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) {
atomicAdd(&B[Air[i]], P[i]);
}
}
int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, float *P, float *B, int n) {
int nthreads = max(32,min(128, nnz));
int nblks = min(65536, max(1, (nnz-1) / 128));
if (n == 1) {
__spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
} else {
__spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P);
__global__ void __reducebin1op(int nrows, int ncols, float *A, float *B, float *C, int opb, int opr);
__global__ void __reduce1op(int nrows, int ncols, float *A, float *B, float initval, int opn);
__global__ void __reduce1iop(int nrows, int ncols, int *A, int *B, int initval, int opn);
__global__ void __reduce1lop(int nrows, int ncols, long long *A, long long *B, long long initval, int opn);
#define DDS_BLKY 32
#if __CUDA_ARCH__ > 200
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(sum, i);
if (threadIdx.x + i < blockDim.x) sum = sum + tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&P[j], sum);
}
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {
__shared__ float merge[32];
int jstart = ((long long)blockIdx.x) * ncols / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
int aoff, boff;
float user, prod, sum, bsum;
for (int j0 = jstart; j0 < jend ; j0++) {
boff = nrows * j0;
user = B[tid + boff];
for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) {
aoff = nrows * Cir[j];
prod = A[tid + aoff] * user;
sum = prod + __shfl_down(prod, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
bsum = __shfl(sum, 0);
__syncthreads();
if (threadIdx.x == threadIdx.y) {
merge[threadIdx.x] = bsum;
}
__syncthreads();
if (threadIdx.y == 0) {
sum = merge[threadIdx.x];
sum = sum + __shfl_down(sum, 1);
sum = sum + __shfl_down(sum, 2);
sum = sum + __shfl_down(sum, 4);
sum = sum + __shfl_down(sum, 8);
sum = sum + __shfl_down(sum, 16);
if (threadIdx.x == 0) {
P[j] = sum;
}
}
}
}
}
#else
__global__ void __dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
__shared__ float parts[32*DDS_BLKY];
int jstart = ((long long)blockIdx.x) * nnz / gridDim.x;
int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
for (int j = jstart; j < jend ; j++) {
float sum = 0;
int aoff = nrows * Cir[j];
int boff = nrows * Cic[j];
for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) {
sum += A[i + aoff] * B[i + boff];
}
parts[tid] = sum;
for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) {
__syncthreads();
if (i + tid < blockDim.x * blockDim.y) {
parts[tid] = parts[tid] + parts[i + tid];
}
}
__syncthreads();
if (tid == 0) {
P[j] = parts[0];
}
__syncthreads();
}
}
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {}
#endif
int dds(int nrows, int nnz, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,nnz/128));
__dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cic, float *P) {
dim3 blockDims(32, 32, 1);
// int nblocks = min(65536, max(1,nnz/8));
int nblocks = min(16384, max(1,ncols/64));
__dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
#if __CUDA_ARCH__ > 200
#define GENDISTS(DFNAME,DFUNC) \
__global__ void DFNAME(float *A, int lda, float *B, int ldb, float *C, \
int ldc, int d, int nrows, int ncols, float p) { \
int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \
int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \
float va, vb, vc; \
float R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \
R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \
int xi = threadIdx.x + xblk; \
int yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \
} \
yi = threadIdx.x + yblk; \
int nbr = (threadIdx.x + 1) % blockDim.x; \
for (int i = 0; i < d; i++) { \
va = (xi < nrows) ? A[xi + i * lda] : 0; \
vb = (yi < ncols) ? B[yi + i * ldb] : 0; \
vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \
vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \
vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \
vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \
vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \
vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \
vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \
vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \
vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \
vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \
vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \
vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \
vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \
vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \
vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \
vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \
} \
yi = threadIdx.x; \
if (xi < nrows) { \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \
if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \
} \
}
GENDISTS(__l1dist,vc+=abs(va-vb))
GENDISTS(__l2dist,vc+=(va-vb)*(va-vb))
GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p))
GENDISTS(__linfdist,vc=max(vc,abs(va-vb)))
GENDISTS(__msum,vc=max(vc,va+vb))
#else
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
}
__global__ void __l2dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, L2dist not supported on arch <= 200\n");
}
__global__ void __minkowskidist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Minkowski distance not supported on arch <= 200\n");
}
__global__ void __linfdist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-abs distance not supported on arch <= 200\n");
}
__global__ void __msum(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Max-sum multiply not supported on arch <= 200\n");
}
#endif
int dists(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
// cudaSetDevice(ithread);
if (p == 0.0f) {
__linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 1.0f) {
__l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else if (p == 2.0f) {
__l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
} else {
__minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
int maxsumx(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols) {
dim3 blockdim(32,4,4);
dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128);
__msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __dmv(float *a, int nrows, int ncols, float *b, float *c) {
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
float accum = 0.0f;
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
accum += a[tx+nrows*ty] * b[ty];
}
atomicAdd(&c[tx], accum);
}
}
#if __CUDA_ARCH__ > 200
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
for (int i = 1; i < blockDim.x; i *= 2) {
float tmp = __shfl_down(accum, i);
if (threadIdx.x + i < blockDim.x) accum += tmp;
}
if (threadIdx.x == 0) {
atomicAdd(&c[ty], accum);
}
}
}
#else
__global__ void __dmvt(float *a, int nrows, int ncols, float *b, float *c) {
for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) {
float accum = 0.0f;
for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) {
accum += a[tx+nrows*ty] * b[tx];
}
atomicAdd(&c[ty], accum);
}
}
#endif
__global__ void __dmv0(float *a, int nrows, int ncols, int tstep, float *b, float *c) {
float accum = 0.0f;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
if (tx < tstep) {
for (; tx < nrows*ncols; tx += tstep) {
int icol = tx / nrows;
accum += a[tx] * b[icol];
}
int irow = tx % nrows;
atomicAdd(&c[irow], accum);
}
}
int dmv(float *a, int nrows, int ncols, float *b, float *c, int trans) {
if (trans == 1) {
int ntx = min(32, nrows);
int nty = min(32, ncols);
int nbx = min(256, 1 + nrows/ntx/8);
int nby = min(256, 1 + ncols/nty/2);
dim3 blockdims(ntx,nty,1);
dim3 griddims(nbx,nby,1);
__dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c);
} else {
int ntx = min(1024, nrows*ncols);
int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32);
int tstep = (ntx*nbx/nrows)*nrows;
__dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
ae7d9dfc9d3de54083fb20ecf89134e6901de8b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_arr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float b = 2;
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
size_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_arr), dim3(gridBlock),dim3(threadBlock), 0, 0, b,c,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_arr), dim3(gridBlock),dim3(threadBlock), 0, 0, b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_arr), dim3(gridBlock),dim3(threadBlock), 0, 0, b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ae7d9dfc9d3de54083fb20ecf89134e6901de8b7.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_arr.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float b = 2;
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
size_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_arr<<<gridBlock,threadBlock>>>(b,c,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_arr<<<gridBlock,threadBlock>>>(b,c,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_arr<<<gridBlock,threadBlock>>>(b,c,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
aefac09b88b6f4eb2749549cd89f90d061290afd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void GPUPRROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int input_channels, const int height,
const int width, const int output_channels, const int pooled_height,
const int pooled_width, const int* rois_batch_id_data, T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(
const int nthreads, const T* in_data, const T* input_rois,
const T* output_grad_data, const float spatial_scale,
const int input_channels, const int height, const int width,
const int output_channels, const int pooled_height, const int pooled_width,
const int* rois_batch_id_data, T* input_grad_data, const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff<T>(
offset_input_grad_data, sum_out, h_iter, w_iter, h_iter + 1,
w_iter + 1, max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward<T>(
s_w, e_w, s_h, e_h, width, height, win_start_w, win_start_h, win_end_w,
win_end_h, pw, ph, pooled_width, pooled_height, win_size, spatial_scale,
offset_in_data, offset_out_data, offset_input_roi_grad_data,
offset_output_grad_data);
}
}
template <typename T>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
// call cuda kernel function
hipLaunchKernelGGL(( GPUPRROIPoolForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale,
input_channels, height, width, output_channels, pooled_height,
pooled_width, roi_id_data, out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename DeviceContext, typename T>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Input<framework::Tensor>("Out");
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<LoDTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(), false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUPRROIPoolBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, in->data<T>(), rois->data<T>(),
output_grad->data<T>(), spatial_scale, input_channels, height,
width, output_channels, pooled_height, pooled_width, roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()), out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prroi_pool, ops::GPUPRROIPoolOpKernel<float>,
ops::GPUPRROIPoolOpKernel<double>);
REGISTER_OP_CUDA_KERNEL(
prroi_pool_grad,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
aefac09b88b6f4eb2749549cd89f90d061290afd.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/prroi_pool_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaximumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaximumNumBlocks);
}
template <typename T>
__global__ void GPUPRROIPoolForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int input_channels, const int height,
const int width, const int output_channels, const int pooled_height,
const int pooled_width, const int* rois_batch_id_data, T* output_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int input_channel = c;
const T* offset_input_data =
input_data +
(roi_batch_id * input_channels + input_channel) * height * width;
if (win_size > static_cast<T>(0.0)) {
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = 0;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
sum_out += PrRoIPoolingMatCalculation(
offset_input_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
output_data[i] = sum_out / win_size;
} else {
output_data[i] = 0.;
}
}
}
template <typename T>
__global__ void GPUPRROIPoolBackward(
const int nthreads, const T* in_data, const T* input_rois,
const T* output_grad_data, const float spatial_scale,
const int input_channels, const int height, const int width,
const int output_channels, const int pooled_height, const int pooled_width,
const int* rois_batch_id_data, T* input_grad_data, const T* out_data,
T* input_roi_grad_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
// The output is in order (n, c, ph, pw)
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % output_channels;
int n = i / pooled_width / pooled_height / output_channels;
// set roi_batch_id
int roi_batch_id = rois_batch_id_data[n];
int input_channel = c;
int input_offset =
(roi_batch_id * input_channels + input_channel) * height * width;
T* offset_input_grad_data = input_grad_data + input_offset;
const T* offset_output_grad_data = output_grad_data + i;
// [start, end) interval for spatial sampling
const T* offset_input_rois = input_rois + n * 4;
T roi_start_w = static_cast<T>(offset_input_rois[0]) * spatial_scale;
T roi_start_h = static_cast<T>(offset_input_rois[1]) * spatial_scale;
T roi_end_w = static_cast<T>(offset_input_rois[2]) * spatial_scale;
T roi_end_h = static_cast<T>(offset_input_rois[3]) * spatial_scale;
T* offset_input_roi_grad_data = input_roi_grad_data + n * 4;
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(0.0));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(0.0));
// Compute w and h at input feature map
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
T win_start_w = roi_start_w + bin_size_w * pw;
T win_start_h = roi_start_h + bin_size_h * ph;
T win_end_w = win_start_w + bin_size_w;
T win_end_h = win_start_h + bin_size_h;
T win_size = max(static_cast<T>(0.0), bin_size_w * bin_size_h);
int s_w = floor(win_start_w);
int e_w = ceil(win_end_w);
int s_h = floor(win_start_h);
int e_h = ceil(win_end_h);
T sum_out = win_size == static_cast<T>(0.)
? static_cast<T>(0.)
: *offset_output_grad_data / win_size;
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
PrRoIPoolingMatDistributeDiff<T>(
offset_input_grad_data, sum_out, h_iter, w_iter, h_iter + 1,
w_iter + 1, max(win_start_h, static_cast<T>(h_iter)),
max(win_start_w, static_cast<T>(w_iter)),
min(win_end_h, static_cast<T>(h_iter) + static_cast<T>(1.0)),
min(win_end_w, static_cast<T>(w_iter) + static_cast<T>(1.0)),
height, width);
}
}
const T* offset_out_data = out_data + i;
const T* offset_in_data = in_data + input_offset;
PrRoIPoolingCoorBackward<T>(
s_w, e_w, s_h, e_h, width, height, win_start_w, win_start_h, win_end_w,
win_end_h, pw, ph, pooled_width, pooled_height, win_size, spatial_scale,
offset_in_data, offset_out_data, offset_input_roi_grad_data,
offset_output_grad_data);
}
}
template <typename T>
class GPUPRROIPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int input_channels = in_dims[1];
auto output_channels = input_channels;
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
// set rois batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and input(X) batch_size must be the same."));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The rois_num from input and lod must be the same."));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
// call cuda kernel function
GPUPRROIPoolForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale,
input_channels, height, width, output_channels, pooled_height,
pooled_width, roi_id_data, out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename DeviceContext, typename T>
class GPUPRROIPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Input<framework::Tensor>("Out");
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* input_roi_grad =
ctx.Output<LoDTensor>(framework::GradVarName("ROIs"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
int rois_num = rois->dims()[0];
int input_channels = in->dims()[1];
auto output_channels = input_channels;
int height = in->dims()[2];
int width = in->dims()[3];
if (input_grad || input_roi_grad) {
// set roi batch id
framework::Tensor rois_batch_id_list;
rois_batch_id_list.Resize({rois_num});
int* rois_batch_id_data =
rois_batch_id_list.mutable_data<int>(platform::CPUPlace());
if (ctx.HasInput("BatchRoINums") || rois->lod().empty()) {
auto* batchroinum = ctx.Input<Tensor>("BatchRoINums");
framework::Tensor batch_index_cpu;
framework::TensorCopySync(*batchroinum, platform::CPUPlace(),
&batch_index_cpu);
int rois_batch_size = batchroinum->dims()[0];
auto* batch_index = batch_index_cpu.data<int64_t>();
size_t c = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int64_t k = 0; k < batch_index[n]; ++k) {
rois_batch_id_data[c] = n;
c = c + 1;
}
}
} else {
PADDLE_ENFORCE_EQ(rois->lod().empty(), false,
platform::errors::InvalidArgument(
"the lod of Input ROIs should not be empty when "
"BatchRoINums is None!"));
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
rois_batch_id_data[i] = n;
}
}
}
auto cplace = platform::CPUPlace();
auto& dev_ctx = ctx.cuda_device_context();
int bytes = rois_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
const auto gplace = ctx.GetPlace();
memory::Copy(gplace, roi_id_data, cplace, rois_batch_id_data, bytes,
dev_ctx.stream());
input_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<DeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), input_grad, static_cast<T>(0));
input_roi_grad->mutable_data<T>(ctx.GetPlace());
set_zero(ctx.cuda_device_context(), input_roi_grad, static_cast<T>(0));
int output_grad_size = output_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUPRROIPoolBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, in->data<T>(), rois->data<T>(),
output_grad->data<T>(), spatial_scale, input_channels, height,
width, output_channels, pooled_height, pooled_width, roi_id_data,
input_grad->mutable_data<T>(ctx.GetPlace()), out->data<T>(),
input_roi_grad->mutable_data<T>(ctx.GetPlace()));
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prroi_pool, ops::GPUPRROIPoolOpKernel<float>,
ops::GPUPRROIPoolOpKernel<double>);
REGISTER_OP_CUDA_KERNEL(
prroi_pool_grad,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUPRROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
b6dc2f855b3cd67890b49d8e15b913455d3c9e4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUDA_SAFE_CALL(func) \
do { \
hipError_t err = (func); \
if (err != hipSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", hipGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while (0)
__global__ void
cudaProcessUnsignedChar0(unsigned char *dst, unsigned char *src, int imgW, int imgH)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx * 2;
int y = blockIdx.y*bh + ty * 2;
int px = y * imgW + x;
bool flag = 0 < y && y < (imgH - 2) && 0 < x && x < (imgW - 2);
int sx1 = flag ? px - imgW : 0;
int sx2 = flag ? px - imgW + 1 : 0;
int sx3 = flag ? px - imgW + 2 : 0;
int sx4 = flag ? px - 1 : 0;
int sx5 = flag ? px : 0;
int sx6 = flag ? px + 1 : 0;
int sx7 = flag ? px + 2 : 0;
int sx8 = flag ? px + imgW - 1 : 0;
int sx9 = flag ? px + imgW : 0;
int sxa = flag ? px + imgW + 1 : 0;
int sxb = flag ? px + imgW + 2 : 0;
int sxc = flag ? px + imgW * 2 - 1 : 0;
int sxd = flag ? px + imgW * 2 : 0;
int sxe = flag ? px + imgW * 2 + 1 : 0;
// G0 R0 G1 R1 x0 x1 x2 x3
// B0 G2 B1 G3 x4 x5 x6 x7
// G4 R2 G5 R3 x8 x9 xA xB
// B2 G6 B3 G7 xC xD xE xF
int g1 = (int)src[sx2];
int g2 = (int)src[sx5];
int g3 = (int)src[sx7];
int g4 = (int)src[sx8];
int g5 = (int)src[sxa];
int g6 = (int)src[sxd];
int b0 = (int)src[sx4];
int b1 = (int)src[sx6];
int b2 = (int)src[sxc];
int b3 = (int)src[sxe];
int r0 = (int)src[sx1];
int r1 = (int)src[sx3];
int r2 = (int)src[sx9];
int r3 = (int)src[sxb];
int db0 = (b0 + b1) >> 1;
int dg0 = g2;
int dr0 = (r0 + r1) >> 1;
int db1 = b1;
int dg1 = (g1 + g2 + g3 + g5) >> 2;
int dr1 = (r0 + r1 + r2 + r3) >> 2;
int db2 = (b0 + b1 + b2 + b3) >> 2;
int dg2 = (g2 + g4 + g5 + g6) >> 2;
int dr2 = r2;
int db3 = (b1 + b3) >> 1;
int dg3 = g5;
int dr3 = (r2 + r3) >> 1;
int dx = px * 3;
int dst0 = dx;
int dst1 = dx + 3;
int dst2 = dx + imgW * 3;
int dst3 = dx + (imgW + 1) * 3;
dst[dst0 + 0 < imgW * imgH * 3 ? dst0 + 0 : 0] = (unsigned char)db0;
dst[dst0 + 1 < imgW * imgH * 3 ? dst0 + 1 : 0] = (unsigned char)dg0;
dst[dst0 + 2 < imgW * imgH * 3 ? dst0 + 2 : 0] = (unsigned char)dr0;
dst[dst1 + 0 < imgW * imgH * 3 ? dst1 + 0 : 0] = (unsigned char)db1;
dst[dst1 + 1 < imgW * imgH * 3 ? dst1 + 1 : 0] = (unsigned char)dg1;
dst[dst1 + 2 < imgW * imgH * 3 ? dst1 + 2 : 0] = (unsigned char)dr1;
dst[dst2 + 0 < imgW * imgH * 3 ? dst2 + 0 : 0] = (unsigned char)db2;
dst[dst2 + 1 < imgW * imgH * 3 ? dst2 + 1 : 0] = (unsigned char)dg2;
dst[dst2 + 2 < imgW * imgH * 3 ? dst2 + 2 : 0] = (unsigned char)dr2;
dst[dst3 + 0 < imgW * imgH * 3 ? dst3 + 0 : 0] = (unsigned char)db3;
dst[dst3 + 1 < imgW * imgH * 3 ? dst3 + 1 : 0] = (unsigned char)dg3;
dst[dst3 + 2 < imgW * imgH * 3 ? dst3 + 2 : 0] = (unsigned char)dr3;
}
extern "C" void
launchCudaProcessUnsignedChar(dim3 grid, dim3 block, unsigned char* srcImage, unsigned char* dstImage, int imgW, int imgH)
{
hipLaunchKernelGGL(( cudaProcessUnsignedChar0) , dim3(grid), dim3(block), 0, 0, dstImage, srcImage, imgW, imgH);
}
|
b6dc2f855b3cd67890b49d8e15b913455d3c9e4a.cu
|
#define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while (0)
__global__ void
cudaProcessUnsignedChar0(unsigned char *dst, unsigned char *src, int imgW, int imgH)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx * 2;
int y = blockIdx.y*bh + ty * 2;
int px = y * imgW + x;
bool flag = 0 < y && y < (imgH - 2) && 0 < x && x < (imgW - 2);
int sx1 = flag ? px - imgW : 0;
int sx2 = flag ? px - imgW + 1 : 0;
int sx3 = flag ? px - imgW + 2 : 0;
int sx4 = flag ? px - 1 : 0;
int sx5 = flag ? px : 0;
int sx6 = flag ? px + 1 : 0;
int sx7 = flag ? px + 2 : 0;
int sx8 = flag ? px + imgW - 1 : 0;
int sx9 = flag ? px + imgW : 0;
int sxa = flag ? px + imgW + 1 : 0;
int sxb = flag ? px + imgW + 2 : 0;
int sxc = flag ? px + imgW * 2 - 1 : 0;
int sxd = flag ? px + imgW * 2 : 0;
int sxe = flag ? px + imgW * 2 + 1 : 0;
// G0 R0 G1 R1 x0 x1 x2 x3
// B0 G2 B1 G3 x4 x5 x6 x7
// G4 R2 G5 R3 x8 x9 xA xB
// B2 G6 B3 G7 xC xD xE xF
int g1 = (int)src[sx2];
int g2 = (int)src[sx5];
int g3 = (int)src[sx7];
int g4 = (int)src[sx8];
int g5 = (int)src[sxa];
int g6 = (int)src[sxd];
int b0 = (int)src[sx4];
int b1 = (int)src[sx6];
int b2 = (int)src[sxc];
int b3 = (int)src[sxe];
int r0 = (int)src[sx1];
int r1 = (int)src[sx3];
int r2 = (int)src[sx9];
int r3 = (int)src[sxb];
int db0 = (b0 + b1) >> 1;
int dg0 = g2;
int dr0 = (r0 + r1) >> 1;
int db1 = b1;
int dg1 = (g1 + g2 + g3 + g5) >> 2;
int dr1 = (r0 + r1 + r2 + r3) >> 2;
int db2 = (b0 + b1 + b2 + b3) >> 2;
int dg2 = (g2 + g4 + g5 + g6) >> 2;
int dr2 = r2;
int db3 = (b1 + b3) >> 1;
int dg3 = g5;
int dr3 = (r2 + r3) >> 1;
int dx = px * 3;
int dst0 = dx;
int dst1 = dx + 3;
int dst2 = dx + imgW * 3;
int dst3 = dx + (imgW + 1) * 3;
dst[dst0 + 0 < imgW * imgH * 3 ? dst0 + 0 : 0] = (unsigned char)db0;
dst[dst0 + 1 < imgW * imgH * 3 ? dst0 + 1 : 0] = (unsigned char)dg0;
dst[dst0 + 2 < imgW * imgH * 3 ? dst0 + 2 : 0] = (unsigned char)dr0;
dst[dst1 + 0 < imgW * imgH * 3 ? dst1 + 0 : 0] = (unsigned char)db1;
dst[dst1 + 1 < imgW * imgH * 3 ? dst1 + 1 : 0] = (unsigned char)dg1;
dst[dst1 + 2 < imgW * imgH * 3 ? dst1 + 2 : 0] = (unsigned char)dr1;
dst[dst2 + 0 < imgW * imgH * 3 ? dst2 + 0 : 0] = (unsigned char)db2;
dst[dst2 + 1 < imgW * imgH * 3 ? dst2 + 1 : 0] = (unsigned char)dg2;
dst[dst2 + 2 < imgW * imgH * 3 ? dst2 + 2 : 0] = (unsigned char)dr2;
dst[dst3 + 0 < imgW * imgH * 3 ? dst3 + 0 : 0] = (unsigned char)db3;
dst[dst3 + 1 < imgW * imgH * 3 ? dst3 + 1 : 0] = (unsigned char)dg3;
dst[dst3 + 2 < imgW * imgH * 3 ? dst3 + 2 : 0] = (unsigned char)dr3;
}
extern "C" void
launchCudaProcessUnsignedChar(dim3 grid, dim3 block, unsigned char* srcImage, unsigned char* dstImage, int imgW, int imgH)
{
cudaProcessUnsignedChar0 <<< grid, block>>>(dstImage, srcImage, imgW, imgH);
}
|
c6d3860f632b810da2112d632b325f4a494d9937.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "common_cuda.h"
#include "chemistry.h"
#include "equilibrium_solver/minimizer_options.h"
#include "equilibrium_solver/equilibrium_state.h"
#include "equilibrium_solver/equilibrium_solver.h"
namespace equilibrium_solver {
// make variables on the device visible
#ifdef __CUDA_ARCH__
using namespace common_device;
#else
using namespace common;
#endif
using chemistry::ThermodynamicProperties;
__global__
void equilibrate_cuda_kernel(ThermodynamicProperties thermo_props,
size_t ncells,
numeric_t* bs_raw,
EquilibriumState* states,
MinimizerOptions options,
bool init,
MinimizationResultInfo* results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < ncells) {
Eigen::Map<component_amounts_t> b(bs_raw+num_components*idx);
results[idx] = equilibrate(thermo_props, b, *(states+idx), options, init);
}
}
}
|
c6d3860f632b810da2112d632b325f4a494d9937.cu
|
#include "common.h"
#include "common_cuda.h"
#include "chemistry.h"
#include "equilibrium_solver/minimizer_options.h"
#include "equilibrium_solver/equilibrium_state.h"
#include "equilibrium_solver/equilibrium_solver.h"
namespace equilibrium_solver {
// make variables on the device visible
#ifdef __CUDA_ARCH__
using namespace common_device;
#else
using namespace common;
#endif
using chemistry::ThermodynamicProperties;
__global__
void equilibrate_cuda_kernel(ThermodynamicProperties thermo_props,
size_t ncells,
numeric_t* bs_raw,
EquilibriumState* states,
MinimizerOptions options,
bool init,
MinimizationResultInfo* results) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < ncells) {
Eigen::Map<component_amounts_t> b(bs_raw+num_components*idx);
results[idx] = equilibrate(thermo_props, b, *(states+idx), options, init);
}
}
}
|
34eab823905eb046aa501194bf09a175a50589a6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragmaompatomic
dist[end] = dist[source] + weight;
// #pragmaompatomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
return dist;
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
uint* sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// TO-DO PARALLEL
hipLaunchKernelGGL(( sssp_GPU_Kernel), dim3(numBlock), dim3(numThreadsPerBlock) , 0, 0, numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
} while(!finished);
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipMemcpy(dist, d_dist, numNodes * sizeof(uint), hipMemcpyDeviceToHost));
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
return dist;
}
__global__ void sssp_Hybrid_GPU_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished,
uint *d_msgToHostIndex,
uint *d_msgToHostNodeId,
uint *d_msgToHostDist,
int numMsgPerThread,
uint numMsg,
uint *d_msgToDeviceNodeId,
uint *d_msgToDeviceDist) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (numMsg > 0) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) start = numMsg;
if (end > numMsg) end = numMsg;
for (int i = start ; i < end; i++) {
int nodeId = d_msgToDeviceNodeId[i];
int updateDist = d_msgToDeviceDist[i];
if (dist[nodeId] > updateDist) {
atomicMin(&dist[nodeId], updateDist);
}
}
}
__syncthreads();
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
int index = atomicAdd(d_msgToHostIndex, 1);
d_msgToHostNodeId[index] = end;
d_msgToHostDist[index] = dist[end];
}
}
}
void sssp_Hybrid_CPU(int threadId,
int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished,
uint *msgToDeviceIndex,
uint *msgToDeviceNodeId,
uint *msgToDeviceDist,
int numMsg,
int numMsgPerThread,
uint *msgToHostNodeId,
uint *msgToHostDist) {
/* if (numMsg > 0) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) start = numMsg;
if (end > numMsg) end = numMsg;
for (int i = start; i < end; i++) {
int nodeId = msgToHostNodeId[i];
int updateDist = msgToHostDist[i];
if (dist[nodeId] > updateDist) {
dist[nodeId] = updateDist;
}
}
}
for (uint i = 0; i < 1000000000; i ++) ; */
//#pragma omp barrier
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > splitIndex) return;
if (end > splitIndex) {
end = splitIndex;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
uint index;
#pragma omp critical
{
index = *msgToDeviceIndex;
*msgToDeviceIndex = *msgToDeviceIndex + 1;
}
/* #pragma omp atomic capture
{
index = *msgToDeviceIndex;
*msgToDeviceIndex+=1;
} */
msgToDeviceNodeId[index] = end;
msgToDeviceDist[index] = dist[end];
// printf("index:%d nodeId: %d dist: %d\n", index, end, dist[end]);
}
}
}
void sssp_Hybrid_MergeDist(int threadId,
int numNodes,
int numNodesPerThread,
uint *dist,
uint *dist_copy) {
int start = threadId * numNodesPerThread;
int end = (threadId + 1) * numNodesPerThread;
if (start > numNodes) return;
if (end > numNodes) {
end = numNodes;
}
for (int i = start; i < end; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
void sssp_Hybrid_Host_Process_Message(int threadId,
int numMsg,
int numMsgPerThread,
uint *dist,
uint *msgToHostNodeId,
uint *msgToHostDist) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) return;
if (end > numMsg) {
end = numMsg;
}
for (int i = start; i < end; i++) {
int nodeId = msgToHostNodeId[i];
int updateDist = msgToHostDist[i];
if (dist[nodeId] > updateDist) {
dist[nodeId] = updateDist;
}
}
}
__global__ void sssp_Hybrid_GPU_Process_Message(int numMsgPerThread,
uint numMsg,
uint *dist,
uint *preNode,
uint *d_msgToDeviceNodeId,
uint *d_msgToDeviceDist) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) return;
if (end > numMsg) {
end = numMsg;
}
for (int i = start; i < end; i++) {
int nodeId = d_msgToDeviceNodeId[i];
int updateDist = d_msgToDeviceDist[i];
if (dist[nodeId] > updateDist) {
atomicMin(&dist[nodeId], updateDist);
}
}
}
uint* sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
uint *msgToHostNodeId = new uint[numEdges];
uint *msgToHostDist = new uint[numEdges];
uint msgToHostIndex = 0;
uint *msgToDeviceNodeId = new uint[numEdges];
uint *msgToDeviceDist = new uint[numEdges];
uint msgToDeviceIndex = 0;
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
uint *d_msgToHostNodeId;
uint *d_msgToHostDist;
uint *d_msgToHostIndex;
uint *d_msgToDeviceNodeId;
uint *d_msgToDeviceDist;
uint *d_msgToDeviceIndex;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToHostNodeId, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToHostDist, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToHostIndex, sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToDeviceNodeId, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToDeviceDist, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_msgToDeviceIndex, sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_msgToHostNodeId, msgToHostNodeId, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_msgToHostDist, msgToHostDist, numEdges * sizeof(uint), hipMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.5;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_cpu_message;
Timer timer_gpu_message;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
splitRatio = 0.1;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
int d_numMsg = msgToDeviceIndex;
int d_numMsgPerThread = 8;
int h_numMsg = msgToHostIndex;
msgToDeviceIndex = 0;
msgToHostIndex = 0;
timer_gpu.start();
timer_cpu.start();
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
// int h_numMsg = msgToHostIndex;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads) + 1;
sssp_Hybrid_Host_Process_Message(threadId,
h_numMsg,
h_numMsgPerThread,
dist,
msgToHostNodeId,
msgToHostDist);
#pragma omp barrier
if (threadId == (h_numThreads - 1) && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// timer_host_to_device.start();
// gpuErrorcheck(hipMemcpy(d_dist, dist, sizeof(uint) * numNodes, hipMemcpyHostToDevice));
// timer_host_to_device.stop();
gpuErrorcheck(hipMemcpy(d_msgToHostIndex, &msgToHostIndex, sizeof(uint), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sssp_Hybrid_GPU_Kernel), dim3(d_numBlock), dim3(d_numThreadsPerBlock), 0, 0, splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished,
d_msgToHostIndex,
d_msgToHostNodeId,
d_msgToHostDist,
d_numMsgPerThread,
d_numMsg,
d_msgToDeviceNodeId,
d_msgToDeviceDist);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
// timer_device_to_host.start();
// gpuErrorcheck(hipMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, hipMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads - 1) + 1;
sssp_Hybrid_CPU(threadId,
splitIndex,
numEdges,
h_numEdgesPerThread,
dist,
preNode,
edgesSource,
edgesEnd,
edgesWeight,
&finished,
&msgToDeviceIndex,
msgToDeviceNodeId,
msgToDeviceDist,
h_numMsg,
h_numMsgPerThread,
msgToHostNodeId,
msgToHostDist);
timer_cpu.stop();
}
}
d_numMsg = msgToDeviceIndex;
gpuErrorcheck(hipMemcpy(d_msgToDeviceNodeId, msgToDeviceNodeId, sizeof(uint) * d_numMsg, hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_msgToDeviceDist, msgToDeviceDist, sizeof(uint) * d_numMsg, hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(&msgToHostIndex, d_msgToHostIndex, sizeof(uint), hipMemcpyDeviceToHost));
gpuErrorcheck(hipMemcpy(msgToHostNodeId, d_msgToHostNodeId, sizeof(uint) * msgToHostIndex, hipMemcpyDeviceToHost));
gpuErrorcheck(hipMemcpy(msgToHostDist, d_msgToHostDist, sizeof(uint) * msgToHostIndex, hipMemcpyDeviceToHost));
// printf("msgToDeviceIndex: %d \n", msgToDeviceIndex);
finished = finished && h_finished;
timer_cpu_message.start();
timer_gpu_message.start();
// Merge data
/* #pragma omp parallel
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == (h_numThreads - 1)) {
int d_numMsg = msgToDeviceIndex;
int d_numMsgPerThread = 8;
d_numBlock = (d_numMsg) / (d_numThreadsPerBlock * d_numMsgPerThread) + 1;
gpuErrorcheck(hipMemcpy(d_msgToDeviceNodeId, msgToDeviceNodeId, sizeof(uint) * d_numMsg, hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_msgToDeviceDist, msgToDeviceDist, sizeof(uint) * d_numMsg, hipMemcpyHostToDevice));
sssp_Hybrid_GPU_Process_Message<<< d_numBlock, d_numThreadsPerBlock >>> (d_numMsgPerThread,
d_numMsg,
d_dist,
d_preNode,
d_msgToDeviceNodeId,
d_msgToDeviceDist);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
timer_gpu_message.stop();
} else if (threadId != (h_numThreads - 1)) {
int h_numMsg = msgToHostIndex;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads - 1) + 1;
sssp_Hybrid_Host_Process_Message(threadId,
h_numMsg,
h_numMsgPerThread,
dist,
msgToHostNodeId,
msgToHostDist);
timer_cpu_message.stop();
}
} */
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1.1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else if (factor < 0.9) {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime() > 0 ? timer_cpu.elapsedTime() : 0;
loopInfo.time_gpu = timer_gpu.elapsedTime() > 0 ? timer_gpu.elapsedTime() : 0;
loopInfo.time_cpu_message = timer_cpu_message.elapsedTime() > 0 ? timer_cpu_message.elapsedTime() : 0;
loopInfo.time_gpu_message = timer_gpu_message.elapsedTime() > 0 ? timer_gpu_message.elapsedTime() : 0;
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
printLoopInfoV2(infos);
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
gpuErrorcheck(hipFree(d_msgToHostNodeId));
gpuErrorcheck(hipFree(d_msgToHostDist));
gpuErrorcheck(hipFree(d_msgToHostIndex));
gpuErrorcheck(hipFree(d_msgToDeviceNodeId));
gpuErrorcheck(hipFree(d_msgToDeviceDist));
gpuErrorcheck(hipFree(d_msgToDeviceIndex));
return dist;
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
}
timer_total.stop();
printf("Total execution time: %f ms\n", timer_total.elapsedTime());
printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
}
|
34eab823905eb046aa501194bf09a175a50589a6.cu
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragma omp atomic
dist[end] = dist[source] + weight;
// #pragma omp atomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
return dist;
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
uint* sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// TO-DO PARALLEL
sssp_GPU_Kernel<<< numBlock, numThreadsPerBlock >>> (numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
} while(!finished);
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaMemcpy(dist, d_dist, numNodes * sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
return dist;
}
__global__ void sssp_Hybrid_GPU_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished,
uint *d_msgToHostIndex,
uint *d_msgToHostNodeId,
uint *d_msgToHostDist,
int numMsgPerThread,
uint numMsg,
uint *d_msgToDeviceNodeId,
uint *d_msgToDeviceDist) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (numMsg > 0) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) start = numMsg;
if (end > numMsg) end = numMsg;
for (int i = start ; i < end; i++) {
int nodeId = d_msgToDeviceNodeId[i];
int updateDist = d_msgToDeviceDist[i];
if (dist[nodeId] > updateDist) {
atomicMin(&dist[nodeId], updateDist);
}
}
}
__syncthreads();
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
int index = atomicAdd(d_msgToHostIndex, 1);
d_msgToHostNodeId[index] = end;
d_msgToHostDist[index] = dist[end];
}
}
}
void sssp_Hybrid_CPU(int threadId,
int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished,
uint *msgToDeviceIndex,
uint *msgToDeviceNodeId,
uint *msgToDeviceDist,
int numMsg,
int numMsgPerThread,
uint *msgToHostNodeId,
uint *msgToHostDist) {
/* if (numMsg > 0) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) start = numMsg;
if (end > numMsg) end = numMsg;
for (int i = start; i < end; i++) {
int nodeId = msgToHostNodeId[i];
int updateDist = msgToHostDist[i];
if (dist[nodeId] > updateDist) {
dist[nodeId] = updateDist;
}
}
}
for (uint i = 0; i < 1000000000; i ++) ; */
//#pragma omp barrier
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > splitIndex) return;
if (end > splitIndex) {
end = splitIndex;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
uint index;
#pragma omp critical
{
index = *msgToDeviceIndex;
*msgToDeviceIndex = *msgToDeviceIndex + 1;
}
/* #pragma omp atomic capture
{
index = *msgToDeviceIndex;
*msgToDeviceIndex+=1;
} */
msgToDeviceNodeId[index] = end;
msgToDeviceDist[index] = dist[end];
// printf("index:%d nodeId: %d dist: %d\n", index, end, dist[end]);
}
}
}
void sssp_Hybrid_MergeDist(int threadId,
int numNodes,
int numNodesPerThread,
uint *dist,
uint *dist_copy) {
int start = threadId * numNodesPerThread;
int end = (threadId + 1) * numNodesPerThread;
if (start > numNodes) return;
if (end > numNodes) {
end = numNodes;
}
for (int i = start; i < end; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
void sssp_Hybrid_Host_Process_Message(int threadId,
int numMsg,
int numMsgPerThread,
uint *dist,
uint *msgToHostNodeId,
uint *msgToHostDist) {
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) return;
if (end > numMsg) {
end = numMsg;
}
for (int i = start; i < end; i++) {
int nodeId = msgToHostNodeId[i];
int updateDist = msgToHostDist[i];
if (dist[nodeId] > updateDist) {
dist[nodeId] = updateDist;
}
}
}
__global__ void sssp_Hybrid_GPU_Process_Message(int numMsgPerThread,
uint numMsg,
uint *dist,
uint *preNode,
uint *d_msgToDeviceNodeId,
uint *d_msgToDeviceDist) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int start = threadId * numMsgPerThread;
int end = (threadId + 1) * numMsgPerThread;
if (start > numMsg) return;
if (end > numMsg) {
end = numMsg;
}
for (int i = start; i < end; i++) {
int nodeId = d_msgToDeviceNodeId[i];
int updateDist = d_msgToDeviceDist[i];
if (dist[nodeId] > updateDist) {
atomicMin(&dist[nodeId], updateDist);
}
}
}
uint* sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
uint *msgToHostNodeId = new uint[numEdges];
uint *msgToHostDist = new uint[numEdges];
uint msgToHostIndex = 0;
uint *msgToDeviceNodeId = new uint[numEdges];
uint *msgToDeviceDist = new uint[numEdges];
uint msgToDeviceIndex = 0;
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
uint *d_msgToHostNodeId;
uint *d_msgToHostDist;
uint *d_msgToHostIndex;
uint *d_msgToDeviceNodeId;
uint *d_msgToDeviceDist;
uint *d_msgToDeviceIndex;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToHostNodeId, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToHostDist, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToHostIndex, sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToDeviceNodeId, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToDeviceDist, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_msgToDeviceIndex, sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_msgToHostNodeId, msgToHostNodeId, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_msgToHostDist, msgToHostDist, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.5;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_cpu_message;
Timer timer_gpu_message;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
splitRatio = 0.1;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
int d_numMsg = msgToDeviceIndex;
int d_numMsgPerThread = 8;
int h_numMsg = msgToHostIndex;
msgToDeviceIndex = 0;
msgToHostIndex = 0;
timer_gpu.start();
timer_cpu.start();
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
// int h_numMsg = msgToHostIndex;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads) + 1;
sssp_Hybrid_Host_Process_Message(threadId,
h_numMsg,
h_numMsgPerThread,
dist,
msgToHostNodeId,
msgToHostDist);
#pragma omp barrier
if (threadId == (h_numThreads - 1) && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// timer_host_to_device.start();
// gpuErrorcheck(cudaMemcpy(d_dist, dist, sizeof(uint) * numNodes, cudaMemcpyHostToDevice));
// timer_host_to_device.stop();
gpuErrorcheck(cudaMemcpy(d_msgToHostIndex, &msgToHostIndex, sizeof(uint), cudaMemcpyHostToDevice));
sssp_Hybrid_GPU_Kernel<<< d_numBlock, d_numThreadsPerBlock>>> (splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished,
d_msgToHostIndex,
d_msgToHostNodeId,
d_msgToHostDist,
d_numMsgPerThread,
d_numMsg,
d_msgToDeviceNodeId,
d_msgToDeviceDist);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
// timer_device_to_host.start();
// gpuErrorcheck(cudaMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, cudaMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads - 1) + 1;
sssp_Hybrid_CPU(threadId,
splitIndex,
numEdges,
h_numEdgesPerThread,
dist,
preNode,
edgesSource,
edgesEnd,
edgesWeight,
&finished,
&msgToDeviceIndex,
msgToDeviceNodeId,
msgToDeviceDist,
h_numMsg,
h_numMsgPerThread,
msgToHostNodeId,
msgToHostDist);
timer_cpu.stop();
}
}
d_numMsg = msgToDeviceIndex;
gpuErrorcheck(cudaMemcpy(d_msgToDeviceNodeId, msgToDeviceNodeId, sizeof(uint) * d_numMsg, cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_msgToDeviceDist, msgToDeviceDist, sizeof(uint) * d_numMsg, cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(&msgToHostIndex, d_msgToHostIndex, sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrorcheck(cudaMemcpy(msgToHostNodeId, d_msgToHostNodeId, sizeof(uint) * msgToHostIndex, cudaMemcpyDeviceToHost));
gpuErrorcheck(cudaMemcpy(msgToHostDist, d_msgToHostDist, sizeof(uint) * msgToHostIndex, cudaMemcpyDeviceToHost));
// printf("msgToDeviceIndex: %d \n", msgToDeviceIndex);
finished = finished && h_finished;
timer_cpu_message.start();
timer_gpu_message.start();
// Merge data
/* #pragma omp parallel
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == (h_numThreads - 1)) {
int d_numMsg = msgToDeviceIndex;
int d_numMsgPerThread = 8;
d_numBlock = (d_numMsg) / (d_numThreadsPerBlock * d_numMsgPerThread) + 1;
gpuErrorcheck(cudaMemcpy(d_msgToDeviceNodeId, msgToDeviceNodeId, sizeof(uint) * d_numMsg, cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_msgToDeviceDist, msgToDeviceDist, sizeof(uint) * d_numMsg, cudaMemcpyHostToDevice));
sssp_Hybrid_GPU_Process_Message<<< d_numBlock, d_numThreadsPerBlock >>> (d_numMsgPerThread,
d_numMsg,
d_dist,
d_preNode,
d_msgToDeviceNodeId,
d_msgToDeviceDist);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
timer_gpu_message.stop();
} else if (threadId != (h_numThreads - 1)) {
int h_numMsg = msgToHostIndex;
int h_numMsgPerThread = (h_numMsg) / (h_numThreads - 1) + 1;
sssp_Hybrid_Host_Process_Message(threadId,
h_numMsg,
h_numMsgPerThread,
dist,
msgToHostNodeId,
msgToHostDist);
timer_cpu_message.stop();
}
} */
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1.1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else if (factor < 0.9) {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime() > 0 ? timer_cpu.elapsedTime() : 0;
loopInfo.time_gpu = timer_gpu.elapsedTime() > 0 ? timer_gpu.elapsedTime() : 0;
loopInfo.time_cpu_message = timer_cpu_message.elapsedTime() > 0 ? timer_cpu_message.elapsedTime() : 0;
loopInfo.time_gpu_message = timer_gpu_message.elapsedTime() > 0 ? timer_gpu_message.elapsedTime() : 0;
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
printLoopInfoV2(infos);
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
gpuErrorcheck(cudaFree(d_msgToHostNodeId));
gpuErrorcheck(cudaFree(d_msgToHostDist));
gpuErrorcheck(cudaFree(d_msgToHostIndex));
gpuErrorcheck(cudaFree(d_msgToDeviceNodeId));
gpuErrorcheck(cudaFree(d_msgToDeviceDist));
gpuErrorcheck(cudaFree(d_msgToDeviceIndex));
return dist;
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
}
timer_total.stop();
printf("Total execution time: %f ms\n", timer_total.elapsedTime());
printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
}
|
1726e71dd03bea3617f2438fcd611cab5291a0bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __NVCC__
__device__ volatile int PQ[MAX_NODE];
//K in parallel
template <class U>
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge,unsigned T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
template <class U>
__global__ void keepHeapPQ(int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U>
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template <class U>
__global__ void checkMIN(int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
int cost = INT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
start = rev_diff_offset[node];
end = dE;
if(node!=N-1)
end = rev_diff_offset[node+1];
while(start< end){
int p = rev_diff_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_diff_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
//write here
if(cost!=INT_MAX){
Cx[node]=cost;
parent[node]=opt_parent;
}
}
}
//add inserted edges to propogate
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p ==child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//do in 1 thread
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
1726e71dd03bea3617f2438fcd611cab5291a0bf.cu
|
#ifdef __NVCC__
__device__ volatile int PQ[MAX_NODE];
//K in parallel
template <class U>
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
template <class T,class U>
__global__ void A_star_expand(int* off,int* edge,unsigned T* W,U* Hx,int* parent,volatile U* Cx,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,unsigned int* diff_weight,int dE ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
//diff expand
if(flagDiff){
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start<end){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
//end diff
}//end
}
//K in parallel -- O(N)
template <class U>
__global__ void keepHeapPQ(int* PQ_size,U* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
template <class U>
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
template <class U>
__global__ void checkMIN(int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
if( Cx[node] < Cx[dest] ){
atomicAnd(flagEnd,0);
}
}
}
template <class T, class U>
__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx,
int* rev_offset,int* rev_edges,T* rev_weight,int N,int E,
U* Hx,volatile int* parent,int* parent_old,int* addFlag,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<delEdge){
int node = delEdgesV[id];
//check for the parent and add to nextflag and update the cost
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
parent[node] = -1;
Cx[node]=INT_MAX;
addFlag[node]=1;
int cost = INT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
start = rev_diff_offset[node];
end = dE;
if(node!=N-1)
end = rev_diff_offset[node+1];
while(start< end){
int p = rev_diff_edges[start];
//del edges
if(p<0 || p==node){
start++;
continue;
}
int weight = rev_diff_weight[start];
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
//write here
if(cost!=INT_MAX){
Cx[node]=cost;
parent[node]=opt_parent;
}
}
}
//add inserted edges to propogate
template <class T, class U>
__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag,
volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
int node = id;
int start = diff_off[node];
int end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edges[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
bool flag_cycle = false;
int ancestor = node;
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
parent[child] = node;
__threadfence();
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
template <class T,class U>
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx,
int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* diff_off,int* diff_edge,T* diff_W,int dE,
int* rev_offset,int* rev_edges,T* rev_weight,
int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
start = diff_off[node];
end = dE;
if(node!=N-1)
end = diff_off[node+1];
while(start < end ){
int child = diff_edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(!leaveLoop){
if(atomicCAS(&lock[child],0,1)==0){
if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = INT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p ==child){
rstart++;
continue;
}
int weight = rev_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child)
flag_cycle = true;
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
rstart = rev_diff_offset[child];
rend = dE;
if(child!=N-1)
rend = rev_diff_offset[child+1];
while(rstart < rend){
int p = rev_diff_edges[rstart];
if(p<0 || p==child){
rstart++;
continue;
}
int weight = rev_diff_weight[rstart];
int flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor!=-1){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}
}
//do in 1 thread
template <class U>
__global__ void insertDest(int* PQ_size,U* Cx,int dest,int* openList){
int id = 0;
int front = 0;
if(openList[dest]==-1){
PQ[front+PQ_size[id]]= dest;
PQ_size[id]+=1;
//add in openList
openList[dest] = id;
if(PQ_size[id]>1){
int index = PQ_size[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
template <class U>
__global__ void getCx(U* Cx,int dest,U* val){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
*val = Cx[dest];
}
}
#endif
|
eb15cc7130eda24d2731a86c3594b2fdb468f0fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "matmul_integer.cuh"
#include <hipcub/hipcub.hpp>
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <int TPB>
__global__ void ReduceRowSumOnMatrixAKernel(const int8_t* matrix, int32_t* row_sum, const int8_t* offset, int32_t K) {
int32_t thread_data = 0;
const int8_t* row_ptr = matrix + blockIdx.x * K;
for (int i = threadIdx.x; i < K; i += TPB) {
thread_data += *(row_ptr + i);
}
using BlockReduce = hipcub::BlockReduce<int32_t, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t sum = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) {
row_sum[blockIdx.x] = (*offset) * sum;
}
}
Status ReduceRowSumOnMatrixA(const int8_t* matrix, int32_t* row_sum, const int8_t* offset, const MatMulComputeHelper& helper) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
hipLaunchKernelGGL(( ReduceRowSumOnMatrixAKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>)
, dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, 0, matrix + helper.LeftOffsets()[batch],
row_sum + batch * helper.M(),
offset,
static_cast<int>(helper.K()));
}
return CUDA_CALL( hipPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
template <int TPB>
__global__ void ReduceColSumOnMatrixBKernel(const int8_t* matrix, int32_t* col_sum, const int8_t* offset, int32_t row, int32_t col) {
int32_t thread_data = 0;
const int8_t* col_ptr = matrix + blockIdx.x;
for (int i = threadIdx.x; i < row; i += TPB) {
thread_data += *(col_ptr + i * col);
}
using BlockReduce = hipcub::BlockReduce<int32_t, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t sum = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) {
col_sum[blockIdx.x] = (*offset) * sum;
}
}
Status ReduceColSumOnMatrixB(const int8_t* matrix, int32_t* col_sum, const int8_t* offset, const MatMulComputeHelper& helper) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
hipLaunchKernelGGL(( ReduceColSumOnMatrixBKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>)
, dim3(static_cast<int>(helper.N())), dim3(GridDim::maxThreadsPerBlock), 0, 0, matrix + helper.RightOffsets()[batch],
col_sum + batch * helper.N(),
offset,
static_cast<int32_t>(helper.K()),
static_cast<int32_t>(helper.N()));
}
return CUDA_CALL( hipPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
__global__ void ComputeOffsetOfMatrixAB(const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t* a_offset,
const int8_t* b_offset,
int32_t K,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = K * (*a_offset) * (*b_offset) - row_sum[blockIdx.x] - col_sum[i];
}
}
__global__ void ComputeOffsetOfMatrixA(const int32_t* col_sum,
int32_t* output,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = -col_sum[i];
}
}
__global__ void ComputeOffsetOfMatrixB(const int32_t* row_sum,
int32_t* output,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = -row_sum[blockIdx.x];
}
}
Status OffsetOutput(const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t* a_offset,
const int8_t* b_offset,
const MatMulComputeHelper& helper) {
if (a_offset && b_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
hipLaunchKernelGGL(( ComputeOffsetOfMatrixAB), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, 0,
row_sum + batch * helper.M(),
col_sum + batch * helper.N(),
output + helper.OutputOffsets()[batch],
a_offset,
b_offset,
static_cast<int32_t>(helper.K()),
static_cast<int32_t>(helper.N()));
}
} else if (a_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
hipLaunchKernelGGL(( ComputeOffsetOfMatrixA), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, 0,
col_sum + batch * helper.N(),
output + helper.OutputOffsets()[batch],
static_cast<int32_t>(helper.N()));
}
} else if (b_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
hipLaunchKernelGGL(( ComputeOffsetOfMatrixB), dim3(static_cast<int>(helper.M())), dim3(GridDim::maxThreadsPerBlock), 0, 0,
row_sum + batch * helper.M(),
output + helper.OutputOffsets()[batch],
static_cast<int32_t>(helper.N()));
}
}
return CUDA_CALL( hipPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
__global__ void PadMatrixInLeadingDimensionKernel(const int8_t* src, int8_t* dst, int col_src, int col_dst) {
for (int32_t i = threadIdx.x; i < col_src; i += blockDim.x) {
*(dst + blockIdx.x * col_dst + i) = *(src + blockIdx.x * col_src + i);
}
}
Status PadMatrixInLeadingDimension(const int8_t* src, int8_t* dst, int64_t row, int64_t col, int64_t pad_size) {
hipLaunchKernelGGL(( PadMatrixInLeadingDimensionKernel), dim3(static_cast<int>(row)), dim3(GridDim::maxThreadsPerBlock), 0, 0,
src,
dst,
static_cast<int>(col),
static_cast<int>(col + pad_size));
return CUDA_CALL(hipPeekAtLastError()) ? Status::OK() : Status(common::ONNXRUNTIME, common::FAIL);
;
}
} // namespace cuda
} // namespace onnxruntime
|
eb15cc7130eda24d2731a86c3594b2fdb468f0fc.cu
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "matmul_integer.cuh"
#include <cub/cub.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
template <int TPB>
__global__ void ReduceRowSumOnMatrixAKernel(const int8_t* matrix, int32_t* row_sum, const int8_t* offset, int32_t K) {
int32_t thread_data = 0;
const int8_t* row_ptr = matrix + blockIdx.x * K;
for (int i = threadIdx.x; i < K; i += TPB) {
thread_data += *(row_ptr + i);
}
using BlockReduce = cub::BlockReduce<int32_t, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t sum = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) {
row_sum[blockIdx.x] = (*offset) * sum;
}
}
Status ReduceRowSumOnMatrixA(const int8_t* matrix, int32_t* row_sum, const int8_t* offset, const MatMulComputeHelper& helper) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
ReduceRowSumOnMatrixAKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>
<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0>>>(matrix + helper.LeftOffsets()[batch],
row_sum + batch * helper.M(),
offset,
static_cast<int>(helper.K()));
}
return CUDA_CALL( cudaPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
template <int TPB>
__global__ void ReduceColSumOnMatrixBKernel(const int8_t* matrix, int32_t* col_sum, const int8_t* offset, int32_t row, int32_t col) {
int32_t thread_data = 0;
const int8_t* col_ptr = matrix + blockIdx.x;
for (int i = threadIdx.x; i < row; i += TPB) {
thread_data += *(col_ptr + i * col);
}
using BlockReduce = cub::BlockReduce<int32_t, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int32_t sum = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) {
col_sum[blockIdx.x] = (*offset) * sum;
}
}
Status ReduceColSumOnMatrixB(const int8_t* matrix, int32_t* col_sum, const int8_t* offset, const MatMulComputeHelper& helper) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
ReduceColSumOnMatrixBKernel<static_cast<int>(GridDim::maxThreadsPerBlock)>
<<<static_cast<int>(helper.N()), GridDim::maxThreadsPerBlock, 0>>>(matrix + helper.RightOffsets()[batch],
col_sum + batch * helper.N(),
offset,
static_cast<int32_t>(helper.K()),
static_cast<int32_t>(helper.N()));
}
return CUDA_CALL( cudaPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
__global__ void ComputeOffsetOfMatrixAB(const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t* a_offset,
const int8_t* b_offset,
int32_t K,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = K * (*a_offset) * (*b_offset) - row_sum[blockIdx.x] - col_sum[i];
}
}
__global__ void ComputeOffsetOfMatrixA(const int32_t* col_sum,
int32_t* output,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = -col_sum[i];
}
}
__global__ void ComputeOffsetOfMatrixB(const int32_t* row_sum,
int32_t* output,
int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = -row_sum[blockIdx.x];
}
}
Status OffsetOutput(const int32_t* row_sum,
const int32_t* col_sum,
int32_t* output,
const int8_t* a_offset,
const int8_t* b_offset,
const MatMulComputeHelper& helper) {
if (a_offset && b_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
ComputeOffsetOfMatrixAB<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0>>>(
row_sum + batch * helper.M(),
col_sum + batch * helper.N(),
output + helper.OutputOffsets()[batch],
a_offset,
b_offset,
static_cast<int32_t>(helper.K()),
static_cast<int32_t>(helper.N()));
}
} else if (a_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
ComputeOffsetOfMatrixA<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0>>>(
col_sum + batch * helper.N(),
output + helper.OutputOffsets()[batch],
static_cast<int32_t>(helper.N()));
}
} else if (b_offset) {
for (size_t batch = 0; batch < helper.OutputOffsets().size(); batch++) {
ComputeOffsetOfMatrixB<<<static_cast<int>(helper.M()), GridDim::maxThreadsPerBlock, 0>>>(
row_sum + batch * helper.M(),
output + helper.OutputOffsets()[batch],
static_cast<int32_t>(helper.N()));
}
}
return CUDA_CALL( cudaPeekAtLastError() ) ? Status::OK() : Status( common::ONNXRUNTIME, common::FAIL );
}
__global__ void PadMatrixInLeadingDimensionKernel(const int8_t* src, int8_t* dst, int col_src, int col_dst) {
for (int32_t i = threadIdx.x; i < col_src; i += blockDim.x) {
*(dst + blockIdx.x * col_dst + i) = *(src + blockIdx.x * col_src + i);
}
}
Status PadMatrixInLeadingDimension(const int8_t* src, int8_t* dst, int64_t row, int64_t col, int64_t pad_size) {
PadMatrixInLeadingDimensionKernel<<<static_cast<int>(row), GridDim::maxThreadsPerBlock, 0>>>(
src,
dst,
static_cast<int>(col),
static_cast<int>(col + pad_size));
return CUDA_CALL(cudaPeekAtLastError()) ? Status::OK() : Status(common::ONNXRUNTIME, common::FAIL);
;
}
} // namespace cuda
} // namespace onnxruntime
|
c274fbf89e100898fd51dc3277d90983e45e0a62.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tint4
#include <stdlib.h> //
#include <hip/hip_runtime.h>
/**/
#define width 1024
#define heigth 1024
/*bmp*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //BITMAPFILEHEADER14 byte
unsigned short bfType; //bfTypebmp"BM"
uint32_t bfSize; //bfsize
unsigned short bfReserved1; //bfReserved120
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBits
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERbmp40 byte
uint32_t biSize; //
uint32_t biWidth; //
uint32_t biHeight; //
unsigned short biPlanes; //1
unsigned short biBitCount; //bit8
uint32_t biCompression; //bmp0
uint32_t biSizeImage; //bmpbiCompression=00
uint32_t biXPelsPerMeter; //biXPelsPerMeterbiYPelsPerMeter0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*cgh*/
__global__ void func_cgh_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
float xx, yy, zz, rr;
j=blockDim.x*blockIdx.x+threadIdx.x; //width
i=blockDim.y*blockIdx.y+threadIdx.y; //heigth
//
float interval=10.5F; //
float wave_len=0.633F; //
float wave_num=2.0*M_PI/wave_len; //
float cnst=interval*wave_num;
for(k=0; k<284; k++){
xx=((float)j-x_d[k])*((float)j-x_d[k]);
yy=((float)i-y_d[k])*((float)i-y_d[k]);
zz=z_d[k]*z_d[k];
rr=sqrt(xx+yy+zz);
lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(cnst*rr);
}
}
/**/
float lumi_intensity[width*heigth]; //
unsigned char img[width*heigth]; //bmp
/*main*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
/**/
int i, j;
int points; //
float min, max, mid; //2
FILE *fp, *fp1;
/*BITMAPFILEHEADER*/
bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize =14+40+1024+(width*heigth); //10242564 byte
bmpFh.bfReserved1 =0;
bmpFh.bfReserved2 =0;
bmpFh.bf0ffBits =14+40+1024;
/*BITMAPINFOHEADER*/
bmpIh.biSize =40;
bmpIh.biWidth =width;
bmpIh.biHeight =heigth;
bmpIh.biPlanes =1;
bmpIh.biBitCount =8;
bmpIh.biCompression =0;
bmpIh.biSizeImage =0;
bmpIh.biXPelsPerMeter =0;
bmpIh.biYPelsPerMeter =0;
bmpIh.biCirUsed =0;
bmpIh.biCirImportant =0;
/*RGBQUAD*/
for(i=0; i<256; i++){
rgbQ[i].rgbBlue =i;
rgbQ[i].rgbGreen =i;
rgbQ[i].rgbRed =i;
rgbQ[i].rgbReserved =0;
}
/*3D*/
fp=fopen("cube284.3d","rb"); //
fread(&points, sizeof(int), 1, fp); //
printf("the number of points is %d\n", points);
//
int x[points]; //~~~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //
/**/
for(i=0; i<points; i++){
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i]=x_buf*40+width*0.5; //40
y[i]=y_buf*40+heigth*0.5;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
/**/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
dim3 block(32,32,1); //()
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //()
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
/**/
hipMalloc((void**)&x_d, points*sizeof(int));
hipMalloc((void**)&y_d, points*sizeof(int));
hipMalloc((void**)&z_d, points*sizeof(float));
hipMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float));
/**/
hipMemcpy(x_d, x, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(y_d, y, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(z_d, z, points*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), hipMemcpyHostToDevice);
/**/
hipLaunchKernelGGL(( func_cgh_gpu), dim3(grid), dim3(block) , 0, 0, x_d, y_d, z_d, lumi_intensity_d);
/**/
hipMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), hipMemcpyDeviceToHost);
/**/
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(lumi_intensity_d);
//lumi_intensity[0]
min=lumi_intensity[0];
max=lumi_intensity[0];
/**/
for(i=0; i<heigth; i++){
for(j=0; j<width; j++){
if(min>lumi_intensity[i*width+j]){
min=lumi_intensity[i*width+j];
}
if(max<lumi_intensity[i*width+j]){
max=lumi_intensity[i*width+j];
}
}
}
mid=(min+max)*0.5F; //
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
/*2*/
for(i=0; i<width*heigth; i++){
if(lumi_intensity[i]<mid){
img[i]=0;
}
if(lumi_intensity[i]>mid){
img[i]=255;
}
}
/*fp(b)(w)*/
fp1=fopen("root-gpu.bmp","wb");
/**/
fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);
fwrite(&bmpIh, sizeof(bmpIh), 1, fp1);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1);
fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmp
printf("'root-gpu.bmp' was saved.\n\n");
fclose(fp1);
return 0;
}
|
c274fbf89e100898fd51dc3277d90983e45e0a62.cu
|
#include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tは符号なしintで4バイトに指定
#include <stdlib.h> //記憶域管理を使うため
#include <cuda.h>
/*記号定数として横幅と縦幅を定義*/
#define width 1024
#define heigth 1024
/*bmpの構造体*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte
unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る
uint32_t bfSize; //bfsizeは,ファイル全体のバイト数
unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte
uint32_t biSize; //画像のサイズ
uint32_t biWidth; //横の画素数
uint32_t biHeight; //縦の画素数
unsigned short biPlanes; //1
unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8
uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0
uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0
uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*cghの計算式のカーネル関数*/
__global__ void func_cgh_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
float xx, yy, zz, rr;
j=blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え
i=blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え
//計算に必要な変数の定義
float interval=10.5F; //画素間隔
float wave_len=0.633F; //光波長
float wave_num=2.0*M_PI/wave_len; //波数
float cnst=interval*wave_num;
for(k=0; k<284; k++){
xx=((float)j-x_d[k])*((float)j-x_d[k]);
yy=((float)i-y_d[k])*((float)i-y_d[k]);
zz=z_d[k]*z_d[k];
rr=sqrt(xx+yy+zz);
lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(cnst*rr);
}
}
/*画像生成用の配列*/
float lumi_intensity[width*heigth]; //光強度用の配列
unsigned char img[width*heigth]; //bmp用の配列
/*main関数*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
/*ホスト側の変数*/
int i, j;
int points; //物体点
float min, max, mid; //2値化に用いる
FILE *fp, *fp1;
/*BITMAPFILEHEADERの構造体*/
bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize =14+40+1024+(width*heigth); //1024はカラーパレットのサイズ.256階調で4 byte一組
bmpFh.bfReserved1 =0;
bmpFh.bfReserved2 =0;
bmpFh.bf0ffBits =14+40+1024;
/*BITMAPINFOHEADERの構造体*/
bmpIh.biSize =40;
bmpIh.biWidth =width;
bmpIh.biHeight =heigth;
bmpIh.biPlanes =1;
bmpIh.biBitCount =8;
bmpIh.biCompression =0;
bmpIh.biSizeImage =0;
bmpIh.biXPelsPerMeter =0;
bmpIh.biYPelsPerMeter =0;
bmpIh.biCirUsed =0;
bmpIh.biCirImportant =0;
/*RGBQUADの構造体*/
for(i=0; i<256; i++){
rgbQ[i].rgbBlue =i;
rgbQ[i].rgbGreen =i;
rgbQ[i].rgbRed =i;
rgbQ[i].rgbReserved =0;
}
/*3Dファイルの読み込み*/
fp=fopen("cube284.3d","rb"); //バイナリで読み込み
fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定
printf("the number of points is %d\n", points);
//取り出した物体点を入れる配列
int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数
/*各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる*/
for(i=0; i<points; i++){
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i]=x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す
y[i]=y_buf*40+heigth*0.5;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
/*デバイス側の変数*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
/*デバイス側のメモリ確保*/
cudaMalloc((void**)&x_d, points*sizeof(int));
cudaMalloc((void**)&y_d, points*sizeof(int));
cudaMalloc((void**)&z_d, points*sizeof(float));
cudaMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float));
/*ホスト側からデバイス側へデータ転送*/
cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), cudaMemcpyHostToDevice);
/*カーネル関数の起動*/
func_cgh_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d);
/*デバイス側からホスト側へデータ転送*/
cudaMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), cudaMemcpyDeviceToHost);
/*デバイスのメモリ解放*/
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(lumi_intensity_d);
//最大・最小値用の変数を比較できるようにとりあえずlumi_intensity[0]を入れる
min=lumi_intensity[0];
max=lumi_intensity[0];
/*最大値,最小値を求める*/
for(i=0; i<heigth; i++){
for(j=0; j<width; j++){
if(min>lumi_intensity[i*width+j]){
min=lumi_intensity[i*width+j];
}
if(max<lumi_intensity[i*width+j]){
max=lumi_intensity[i*width+j];
}
}
}
mid=(min+max)*0.5F; //中間値(閾値)を求める
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
/*各々の光強度配列の値を中間値と比較し,2値化する*/
for(i=0; i<width*heigth; i++){
if(lumi_intensity[i]<mid){
img[i]=0;
}
if(lumi_intensity[i]>mid){
img[i]=255;
}
}
/*宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)*/
fp1=fopen("root-gpu.bmp","wb");
/*書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定*/
fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能
fwrite(&bmpIh, sizeof(bmpIh), 1, fp1);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1);
fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmpに書き込み
printf("'root-gpu.bmp' was saved.\n\n");
fclose(fp1);
return 0;
}
|
b021267350dfc7fea495b3620e5c583c81877aa6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2019 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include "kernels.h"
__global__ void dot_kernel2(float *g_idata1, float *g_idata2, float *g_odata)
{
}
|
b021267350dfc7fea495b3620e5c583c81877aa6.cu
|
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2019 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include "kernels.h"
__global__ void dot_kernel2(float *g_idata1, float *g_idata2, float *g_odata)
{
}
|
0b9d98a93c59fc0e9dc94aaeb4c77dbda56f0135.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#define HUEFILTERHELPER_CUDACOMPILE
#include "HueFilterHelper.h"
struct HueFilterHelper_CopyInfo_c
{
int _anTargetPitch[6];
int _anInputPitch[6];
int _anSize[6];
};
__constant__ HueFilterHelper_CopyInfo_c
cKernelCopyInfo;
template <class TYPE>
__global__ void
HueFilterHelper_CopyAreaCUDAKernel(TYPE *pTarget, const TYPE *pInput, int nYPitch)
{
int
anPos[4];
anPos[0] = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
anPos[2] = blockIdx.y / nYPitch;
int nBlockIdY = blockIdx.y - (anPos[2] * nYPitch);
anPos[1] = __mul24(nBlockIdY,blockDim.y) + threadIdx.y;
//Only if inside buffer
if (anPos[0] < cKernelCopyInfo._anSize[0] &&
anPos[1] < cKernelCopyInfo._anSize[1] &&
anPos[2] < cKernelCopyInfo._anSize[2])
{
int
nReadOffset = anPos[0] * cKernelCopyInfo._anInputPitch[0] +
anPos[1] * cKernelCopyInfo._anInputPitch[1] +
anPos[2] * cKernelCopyInfo._anInputPitch[2];
int
nWriteOffset = anPos[0] * cKernelCopyInfo._anTargetPitch[0] +
anPos[1] * cKernelCopyInfo._anTargetPitch[1] +
anPos[2] * cKernelCopyInfo._anTargetPitch[2];
pTarget[nWriteOffset] = pInput[nReadOffset];
}
}
extern "C" void
HueFilterHelper_CudaCopyFromCPU(void *pxCudaDst, void *pxCPUSrc, int nSize)
{
hipMemcpy(pxCudaDst, pxCPUSrc, nSize, hipMemcpyHostToDevice);
}
extern "C" void *
HueFilterHelper_GetPointer(HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
assert(iOutput < HUEFILTERHELPER_INPUT_MAX);
if (eOutput == HUEFILTERHELPER_OUTPUT) return pcFilterHelper->_apxOutputData[iOutput];
else if (eOutput == HUEFILTERHELPER_TEMP) return pcFilterHelper->_apxTempData[iOutput];
else if (eOutput == HUEFILTERHELPER_INPUT) return pcFilterHelper->_apxInputData[iOutput];
else assert(0);
return NULL;
}
extern "C" void
HueFilterHelper_GetPitch(int *pnOutputPitch, int *pnOutputBitPitch, HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
assert(iOutput < HUEFILTERHELPER_INPUT_MAX);
int *pnPitch = NULL,
*pnBitPitch = NULL;
if (eOutput == HUEFILTERHELPER_OUTPUT)
{
pnPitch = pcFilterHelper->_aanOutputPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanOutputBitPitch[iOutput];
}
else if (eOutput == HUEFILTERHELPER_TEMP)
{
pnPitch = pcFilterHelper->_aanTempPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanTempBitPitch[iOutput];
}
else if (eOutput == HUEFILTERHELPER_INPUT)
{
pnPitch = pcFilterHelper->_aanInputPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanInputBitPitch[iOutput];
}
else assert(0);
for (int i = 0; i < HUEFILTERHELPER_DIMENSIONALITY_MAX; i++)
{
pnOutputPitch[i] = pnPitch[i];
pnOutputBitPitch[i] = pnBitPitch[i];
}
}
extern "C" void
HueFilterHelper_GetArea(int *pnOutputMin, int *pnOutputMax, HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
int
*pnMin = NULL,
*pnMax = NULL;
if (eOutput == HUEFILTERHELPER_OUTPUT)
{
pnMin = pcFilterHelper->_aanOutputMin[iOutput];
pnMax = pcFilterHelper->_aanOutputMax[iOutput];
}
else if (eOutput == HUEFILTERHELPER_TEMP)
{
pnMin = pcFilterHelper->_aanTempMin[iOutput];
pnMax = pcFilterHelper->_aanTempMax[iOutput];
}
else if (eOutput == HUEFILTERHELPER_INPUT)
{
pnMin = pcFilterHelper->_aanInputMin[iOutput];
pnMax = pcFilterHelper->_aanInputMax[iOutput];
}
else assert(0);
for (int i = 0; i < HUEFILTERHELPER_DIMENSIONALITY_MAX; i++)
{
pnOutputMin[i] = pnMin[i];
pnOutputMax[i] = pnMax[i];
}
}
extern "C" hipChannelFormatDesc
HueFilterHelper_GetChannelDesc(HueFilterHelper_ProcessArea * pProcessArea, HueFilterHelper_Type_e eInput, int iInput)
{
hipChannelFormatDesc
cChannelDesc;
HueFilterHelper_DataFormat_c
cFormat;
if (eInput == HUEFILTERHELPER_OUTPUT)
{
cFormat = pProcessArea->_pcFilterHelper->_acOutputDataFormat[iInput];
}
else if (eInput == HUEFILTERHELPER_TEMP)
{
cFormat = pProcessArea->_pcFilterHelper->_acTempDataFormat[iInput];
}
else if (eInput == HUEFILTERHELPER_INPUT)
{
cFormat = pProcessArea->_pcFilterHelper->_acInputDataFormat[iInput];
}
int
nSize0 = 32;
if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_1BIT ||
cFormat._eFormat == HUEFILTERHELPER_FORMAT_BYTE) nSize0 = 8;
else if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_WORD) nSize0 = 16;
int
nSize1 = 0,
nSize2 = 0,
nSize3 = 0;
if (cFormat._eComponents == HUEFILTERHELPER_COMPONENTS_2)
{
nSize1 = nSize0;
}
else if (cFormat._eComponents == HUEFILTERHELPER_COMPONENTS_4)
{
nSize1 = nSize0;
nSize2 = nSize0;
nSize3 = nSize0;
}
if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_FLOAT)
{
cChannelDesc = hipCreateChannelDesc(nSize0, nSize1, nSize2, nSize3, hipChannelFormatKindFloat);
}
else
{
cChannelDesc = hipCreateChannelDesc(nSize0, nSize1, nSize2, nSize3, hipChannelFormatKindUnsigned);
}
return cChannelDesc;
}
void
HueFilterHelper_CudaCopyArea(void *pTarget, const int *pnTargetPitch, const void * pInput, const int *pnInputPitch, const int *pnSize, int nElementSize)
{
HueFilterHelper_CopyInfo_c
cCopyInfo;
for(int iDimension = 0; iDimension < 6; iDimension++)
{
cCopyInfo._anTargetPitch[iDimension] = pnTargetPitch[iDimension];
cCopyInfo._anInputPitch[iDimension] = pnInputPitch[iDimension];
cCopyInfo._anSize[iDimension] = pnSize[iDimension];
}
hipMemcpyToSymbol(cKernelCopyInfo, &cCopyInfo, sizeof(HueFilterHelper_CopyInfo_c));
dim3
cGridSize,
cBlockSize;
cBlockSize.x = 16;
cBlockSize.y = 16;
cBlockSize.z = 1;
cGridSize.x = pnSize[0];
cGridSize.y = pnSize[1];
cGridSize.z = 1;
cGridSize.x = (cGridSize.x + cBlockSize.x - 1) / cBlockSize.x;
cGridSize.y = (cGridSize.y + cBlockSize.y - 1) / cBlockSize.y;
cGridSize.z = 1;
int nYPitch = cGridSize.y;
cGridSize.y *= pnSize[2];
switch(nElementSize)
{
case 1:
hipLaunchKernelGGL(( HueFilterHelper_CopyAreaCUDAKernel), dim3(cGridSize), dim3(cBlockSize), 0, 0, (unsigned char *)pTarget, (const unsigned char *)pInput, nYPitch);
break;
case 2:
hipLaunchKernelGGL(( HueFilterHelper_CopyAreaCUDAKernel), dim3(cGridSize), dim3(cBlockSize), 0, 0, (unsigned short *)pTarget, (const unsigned short *)pInput, nYPitch);
break;
case 4:
hipLaunchKernelGGL(( HueFilterHelper_CopyAreaCUDAKernel), dim3(cGridSize), dim3(cBlockSize), 0, 0, (float *)pTarget, (const float *)pInput, nYPitch);
break;
}
}
extern "C" void
HueFilterHelper_CudaCopyAreaU8(unsigned char *pTarget, const int *pnTargetPitch, const unsigned char * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(char));
}
extern "C" void
HueFilterHelper_CudaCopyAreaU16(unsigned short *pTarget, const int *pnTargetPitch, const unsigned short * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(short));
}
extern "C" void
HueFilterHelper_CudaCopyAreaR32(float *pTarget, const int *pnTargetPitch, const float * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(float));
}
|
0b9d98a93c59fc0e9dc94aaeb4c77dbda56f0135.cu
|
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#define HUEFILTERHELPER_CUDACOMPILE
#include "HueFilterHelper.h"
struct HueFilterHelper_CopyInfo_c
{
int _anTargetPitch[6];
int _anInputPitch[6];
int _anSize[6];
};
__constant__ HueFilterHelper_CopyInfo_c
cKernelCopyInfo;
template <class TYPE>
__global__ void
HueFilterHelper_CopyAreaCUDAKernel(TYPE *pTarget, const TYPE *pInput, int nYPitch)
{
int
anPos[4];
anPos[0] = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
anPos[2] = blockIdx.y / nYPitch;
int nBlockIdY = blockIdx.y - (anPos[2] * nYPitch);
anPos[1] = __mul24(nBlockIdY,blockDim.y) + threadIdx.y;
//Only if inside buffer
if (anPos[0] < cKernelCopyInfo._anSize[0] &&
anPos[1] < cKernelCopyInfo._anSize[1] &&
anPos[2] < cKernelCopyInfo._anSize[2])
{
int
nReadOffset = anPos[0] * cKernelCopyInfo._anInputPitch[0] +
anPos[1] * cKernelCopyInfo._anInputPitch[1] +
anPos[2] * cKernelCopyInfo._anInputPitch[2];
int
nWriteOffset = anPos[0] * cKernelCopyInfo._anTargetPitch[0] +
anPos[1] * cKernelCopyInfo._anTargetPitch[1] +
anPos[2] * cKernelCopyInfo._anTargetPitch[2];
pTarget[nWriteOffset] = pInput[nReadOffset];
}
}
extern "C" void
HueFilterHelper_CudaCopyFromCPU(void *pxCudaDst, void *pxCPUSrc, int nSize)
{
cudaMemcpy(pxCudaDst, pxCPUSrc, nSize, cudaMemcpyHostToDevice);
}
extern "C" void *
HueFilterHelper_GetPointer(HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
assert(iOutput < HUEFILTERHELPER_INPUT_MAX);
if (eOutput == HUEFILTERHELPER_OUTPUT) return pcFilterHelper->_apxOutputData[iOutput];
else if (eOutput == HUEFILTERHELPER_TEMP) return pcFilterHelper->_apxTempData[iOutput];
else if (eOutput == HUEFILTERHELPER_INPUT) return pcFilterHelper->_apxInputData[iOutput];
else assert(0);
return NULL;
}
extern "C" void
HueFilterHelper_GetPitch(int *pnOutputPitch, int *pnOutputBitPitch, HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
assert(iOutput < HUEFILTERHELPER_INPUT_MAX);
int *pnPitch = NULL,
*pnBitPitch = NULL;
if (eOutput == HUEFILTERHELPER_OUTPUT)
{
pnPitch = pcFilterHelper->_aanOutputPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanOutputBitPitch[iOutput];
}
else if (eOutput == HUEFILTERHELPER_TEMP)
{
pnPitch = pcFilterHelper->_aanTempPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanTempBitPitch[iOutput];
}
else if (eOutput == HUEFILTERHELPER_INPUT)
{
pnPitch = pcFilterHelper->_aanInputPitch[iOutput];
pnBitPitch = pcFilterHelper->_aanInputBitPitch[iOutput];
}
else assert(0);
for (int i = 0; i < HUEFILTERHELPER_DIMENSIONALITY_MAX; i++)
{
pnOutputPitch[i] = pnPitch[i];
pnOutputBitPitch[i] = pnBitPitch[i];
}
}
extern "C" void
HueFilterHelper_GetArea(int *pnOutputMin, int *pnOutputMax, HueFilterHelper * pcFilterHelper, HueFilterHelper_Type_e eOutput, int iOutput)
{
int
*pnMin = NULL,
*pnMax = NULL;
if (eOutput == HUEFILTERHELPER_OUTPUT)
{
pnMin = pcFilterHelper->_aanOutputMin[iOutput];
pnMax = pcFilterHelper->_aanOutputMax[iOutput];
}
else if (eOutput == HUEFILTERHELPER_TEMP)
{
pnMin = pcFilterHelper->_aanTempMin[iOutput];
pnMax = pcFilterHelper->_aanTempMax[iOutput];
}
else if (eOutput == HUEFILTERHELPER_INPUT)
{
pnMin = pcFilterHelper->_aanInputMin[iOutput];
pnMax = pcFilterHelper->_aanInputMax[iOutput];
}
else assert(0);
for (int i = 0; i < HUEFILTERHELPER_DIMENSIONALITY_MAX; i++)
{
pnOutputMin[i] = pnMin[i];
pnOutputMax[i] = pnMax[i];
}
}
extern "C" cudaChannelFormatDesc
HueFilterHelper_GetChannelDesc(HueFilterHelper_ProcessArea * pProcessArea, HueFilterHelper_Type_e eInput, int iInput)
{
cudaChannelFormatDesc
cChannelDesc;
HueFilterHelper_DataFormat_c
cFormat;
if (eInput == HUEFILTERHELPER_OUTPUT)
{
cFormat = pProcessArea->_pcFilterHelper->_acOutputDataFormat[iInput];
}
else if (eInput == HUEFILTERHELPER_TEMP)
{
cFormat = pProcessArea->_pcFilterHelper->_acTempDataFormat[iInput];
}
else if (eInput == HUEFILTERHELPER_INPUT)
{
cFormat = pProcessArea->_pcFilterHelper->_acInputDataFormat[iInput];
}
int
nSize0 = 32;
if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_1BIT ||
cFormat._eFormat == HUEFILTERHELPER_FORMAT_BYTE) nSize0 = 8;
else if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_WORD) nSize0 = 16;
int
nSize1 = 0,
nSize2 = 0,
nSize3 = 0;
if (cFormat._eComponents == HUEFILTERHELPER_COMPONENTS_2)
{
nSize1 = nSize0;
}
else if (cFormat._eComponents == HUEFILTERHELPER_COMPONENTS_4)
{
nSize1 = nSize0;
nSize2 = nSize0;
nSize3 = nSize0;
}
if (cFormat._eFormat == HUEFILTERHELPER_FORMAT_FLOAT)
{
cChannelDesc = cudaCreateChannelDesc(nSize0, nSize1, nSize2, nSize3, cudaChannelFormatKindFloat);
}
else
{
cChannelDesc = cudaCreateChannelDesc(nSize0, nSize1, nSize2, nSize3, cudaChannelFormatKindUnsigned);
}
return cChannelDesc;
}
void
HueFilterHelper_CudaCopyArea(void *pTarget, const int *pnTargetPitch, const void * pInput, const int *pnInputPitch, const int *pnSize, int nElementSize)
{
HueFilterHelper_CopyInfo_c
cCopyInfo;
for(int iDimension = 0; iDimension < 6; iDimension++)
{
cCopyInfo._anTargetPitch[iDimension] = pnTargetPitch[iDimension];
cCopyInfo._anInputPitch[iDimension] = pnInputPitch[iDimension];
cCopyInfo._anSize[iDimension] = pnSize[iDimension];
}
cudaMemcpyToSymbol(cKernelCopyInfo, &cCopyInfo, sizeof(HueFilterHelper_CopyInfo_c));
dim3
cGridSize,
cBlockSize;
cBlockSize.x = 16;
cBlockSize.y = 16;
cBlockSize.z = 1;
cGridSize.x = pnSize[0];
cGridSize.y = pnSize[1];
cGridSize.z = 1;
cGridSize.x = (cGridSize.x + cBlockSize.x - 1) / cBlockSize.x;
cGridSize.y = (cGridSize.y + cBlockSize.y - 1) / cBlockSize.y;
cGridSize.z = 1;
int nYPitch = cGridSize.y;
cGridSize.y *= pnSize[2];
switch(nElementSize)
{
case 1:
HueFilterHelper_CopyAreaCUDAKernel<<<cGridSize, cBlockSize>>>((unsigned char *)pTarget, (const unsigned char *)pInput, nYPitch);
break;
case 2:
HueFilterHelper_CopyAreaCUDAKernel<<<cGridSize, cBlockSize>>>((unsigned short *)pTarget, (const unsigned short *)pInput, nYPitch);
break;
case 4:
HueFilterHelper_CopyAreaCUDAKernel<<<cGridSize, cBlockSize>>>((float *)pTarget, (const float *)pInput, nYPitch);
break;
}
}
extern "C" void
HueFilterHelper_CudaCopyAreaU8(unsigned char *pTarget, const int *pnTargetPitch, const unsigned char * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(char));
}
extern "C" void
HueFilterHelper_CudaCopyAreaU16(unsigned short *pTarget, const int *pnTargetPitch, const unsigned short * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(short));
}
extern "C" void
HueFilterHelper_CudaCopyAreaR32(float *pTarget, const int *pnTargetPitch, const float * pInput, const int *pnInputPitch, const int *pnSize)
{
HueFilterHelper_CudaCopyArea(pTarget, pnTargetPitch, pInput, pnInputPitch, pnSize, sizeof(float));
}
|
03a04df5f71e1ae979c3a9a4d1c5d8d5006e6a23.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define DEBUGPRINT 0
__global__ void compute_entropy_gpu_kernel(double *tlag, double *pr, double *vtrans,int ntot, int irho, double ntol , double rgam, double gmaref,int ltot ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
double rho= fmax(vtrans[ltot*(irho-1)+id],ntol);
tlag[id]=rgam*rho*log(pr[id]/(pow(rho,gmaref) ));
}
}
extern "C" void compute_entropy_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_pr, double *d_vtrans,int *ntot, int *irho, double *ntol , double *rgam, double *gmaref, int *ltot){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start compute_entropy_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values ntot = %d, irho = %d, ntol = %lf, rgam = %lf, gmaref = %lf \n",ntot[0],irho[0],ntol[0],rgam[0],gmaref[0] );
#endif
//}
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot[0]/blockSize);
hipLaunchKernelGGL(( compute_entropy_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_pr,d_vtrans,ntot[0],irho[0],ntol[0],rgam[0],gmaref[0],ltot[0]);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
//if (code2 != hipSuccess){
printf("CUDA: End compute_engropy_wrapper cuda status: %s\n",hipGetErrorString(code1));
#endif
//}
}
__global__ void entropy_residual_flux_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *vx, double *vy, double *vz, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
if(stage==1){
res2[id]=tlag[id]-tlag[ltot*lorder+id] ;
}
else{
res2[id]=tlag[id]-tlag[ltot+id] ;
}
res2[id] = res2[id]*rdt;
// evaluate_entropy_flux(e)
totalh[id]= vx[id]*tlag[id];
totalh[lxyzd+id] = vy[id]*tlag[id];
if(if3d){totalh[lxyzd*2+id] = vz[id]*tlag[id];}
//flux_div_mini(e)
}
}
__global__ void flux_div_mini_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int i= id % ldd;
if(id<ntot){
//something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha
if(if3d){
ud[id] = jacmi[id] *( rxm1[id]*ur1[i]+ sxm1[id]*us1[i]+txm1[id]*ut1[i]);
ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[i]+ sym1[id]*us2[i]+txm1[id]*ut2[i]);
ud[id] = ud[id] + jacmi[id] *( rzm1[id]*ur3[i]+ szm1[id]*us3[i]+tzm1[id]*ut3[i]);
}
else{
ud[id] = jacmi[id] *( rxm1[id]*ur1[i]+ sxm1[id]*us1[i]);
ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[i]+ sym1[id]*us2[i]);
}
//add 2
res2[id] = res2[id] + ud[id];
}
}
//mxm multiplication
__global__ void mxm1(double *a, int n1, double *b, int n2, double *c, int n3, int nelt, int aSize, int bSize, int cSize, int extraEq){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*n1*n3){
int e = id/(n1*n3);
int rc = id%(n1*n3);
int i = rc/n3;
int j = rc%n3;
int cid = e*cSize + rc;
int aid = e*aSize + extraEq + i*n2;
int bid = e*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
extern "C" void entropy_residual_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_res2,int *ntot, double *rdt, int *stage, int *lorder,int *ltot, int *lxd, int *lyd, int *lzd, double *d_vx, double *d_vy, double *d_vz, int *lx1, int *ly1, int *lz1, double *d_jacmi, double *d_rxm1, double *d_sxm1, double *d_txm1, double *d_rym1, double *d_sym1, double *d_tym1,double *d_rzm1, double *d_szm1, double *d_tzm1,int *if3d,int *nelt, double *d_dxm1, double *d_dxtm1 ){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start entropy_residual_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start entropy_residual_gpu_wrapper values rdt = %lf, stage = %d, lorder= %d,ltot = %d,lxd = %d, lyd = %d, lzd = %d, lx1 = %d,ly1 = %d,lz1 = %d,if3d = %d,nelt = %d \n",rdt[0], stage[0],lorder[0],ltot[0],lxd[0], lyd[0],lzd[0],lx1[0],ly1[0],lz1[0],if3d[0],nelt[0]);
#endif
//}
double *d_totalh_temp; // Anyway d_totalh seems not needed. check with Dr.Tania. adeesha
double *d_ur1;
double *d_us1;
double *d_ut1;
double *d_ur2;
double *d_us2;
double *d_ut2;
double *d_ur3;
double *d_us3;
double *d_ut3;
double *d_ud;
int lxyzd = lxd[0]*lyd[0]*lzd[0];
int ldd = lx1[0]*ly1[0]*lz1[0];
hipMalloc((void**)&d_totalh_temp,3*lxyzd *nelt[0]* sizeof(double));
hipMalloc((void**)&d_ur1,ldd * nelt[0]*sizeof(double)); //nelt[0] added later. need to double check.
hipMalloc((void**)&d_us1,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ut1,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ur2,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_us2,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ut2,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ur3,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_us3,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ut3,ldd * nelt[0]*sizeof(double));
hipMalloc((void**)&d_ud,nelt[0]*ldd * sizeof(double));
hipMemset(d_totalh_temp, 0.0, 3*lxyzd*nelt[0]*sizeof(double));
hipMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double));
hipMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double));
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot[0]/blockSize);
hipLaunchKernelGGL(( entropy_residual_flux_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_vx, d_vy, d_vz, if3d[0]);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after kernel 1cuda status: %s\n",hipGetErrorString(code1));
#endif
int mdm1 = lx1[0]-1;
// Following is the local_grad3 function
int nx_2 = lx1[0]*lx1[0];
int nx_3 = nx_2*lx1[0];
int nxd_2 = nx_2;
int nxd_3 = nx_3;
//ur(nx,nx*nx) = D(nx,nx) * u(nx,nx*nx) fortran
//ur(nx*nx,nx) = u(nx*nx,nx) * D(nx,nx) C
if(if3d[0]){
blockSize=glbblockSize1[0], gridSize;
// for ur1 us1 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_totalh_temp,nx_2, d_dxm1, lx1[0], d_ur1, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after 1st mxm 1cuda status: %s\n",hipGetErrorString(code1));
#endif
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+k*nx_2, lx1[0],d_us1+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after for loop mxm 1cuda status: %s\n",hipGetErrorString(code1));
#endif
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp, lx1[0], d_ut1, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after 3rd mxm 1cuda status: %s\n",hipGetErrorString(code1));
#endif
// for ur2 us2 and ut2
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_totalh_temp+lxyzd*nelt[0],nx_2, d_dxm1, lx1[0], d_ur2, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0]+k*nx_2, lx1[0],d_us2+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0], lx1[0], d_ut2, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
// for ur3 us3 and ut3
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_totalh_temp+2*lxyzd*nelt[0],nx_2, d_dxm1, lx1[0], d_ur3, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+2*lxyzd*nelt[0]+k*nx_2, lx1[0],d_us3+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+2*lxyzd*nelt[0], lx1[0], d_ut3, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
}else{
// for ur1 us1 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_totalh_temp,lx1[0], d_dxm1, lx1[0], d_ur1, lx1[0], nelt[0], nx_2, 0, nxd_2, 0);//ur,us, ut should be indexed by nxd
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp, lx1[0],d_us1, lx1[0], nelt[0], 0, nx_2, nxd_2, 0);
// for ur2 us2 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_totalh_temp+lxyzd*nelt[0],lx1[0], d_dxm1, lx1[0], d_ur2, lx1[0], nelt[0], nx_2, 0, nxd_2, 0);//ur,us, ut should be indexed by nxd
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
hipLaunchKernelGGL(( mxm1), dim3(gridSize), dim3(blockSize), 0, 0, d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0], lx1[0],d_us2, lx1[0], nelt[0], 0, nx_2, nxd_2, 0);
}
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper before flux_div_mini_gpu_kernel cuda status: %s\n",hipGetErrorString(code1));
#endif
hipLaunchKernelGGL(( flux_div_mini_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
code1 = hipPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel cuda status: %s\n",hipGetErrorString(code1));
#endif
hipFree(d_totalh_temp);
hipFree(d_ur1);
hipFree(d_ur2);
hipFree(d_ur3);
hipFree(d_us1);
hipFree(d_us2);
hipFree(d_us3);
hipFree(d_ut1);
hipFree(d_ut2);
hipFree(d_ut3);
hipFree(d_ud);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
// if (code2 != hipSuccess){
printf("CUDA: End entropy residual_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
//}
}
__global__ void wavevisc_gpu_kernel(double *t,double *csound, double *vx, double *vy, double *vz, int ntot, double *wavespeed,int lxyz, int lx1, int ly1, int lz1, double *vtranstmp, double c_max,int ltot, double *meshh ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
wavespeed[id]= csound [id] +sqrtf(vx[id]*vx[id]+vy[id]*vy[id]+vz[id]*vz[id] ) ;
// find max of wavespeed using reduction
__syncthreads();
unsigned int i = lxyz/2;
int e= id/(lx1*ly1*lz1);
int startofcurrentelement = id-e;
while(i != 0){
if(id-startofcurrentelement < i){
wavespeed[id] = fmaxf(wavespeed[id], wavespeed[id + i]);
}
__syncthreads();
i /= 2;
}
double maxeig = wavespeed[id-e];
// find max of vtrans using reduction. But never used? check with Dr.Tania
//i = lxyz/2;
//int e= id/(lx1*ly1*lz1);
//int startofcurrentelement = id-e;
//while(i != 0){
// if(id-startofcurrentelement < i){
// vtranstmp[id] = fmaxf(vtranstmp[id], vtranstmp[id + i]);
// }
// __syncthreads();
// i /= 2;
//}
//int rhomax = vtranstmp[id-e];
t[2*ltot+id] = c_max*maxeig*meshh[e];
// if(id<10){
// printf("$$$ print from cuda maxeig = %lf t[2*ltot+id]= %lf meshh[e]=%lf \n",maxeig,t[2*ltot+id],meshh[e]);
// }
}
}
extern "C" void wavevisc_gpu_wrapper_(int *glbblockSize1,double *d_t, double *d_csound,double *d_vx, double *d_vy, double *d_vz, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_vtrans, double *c_max, double *d_meshh, int *irho ){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start wavevisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d, lz1= %d,c_max= %lf,irho= %d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_max[0],irho[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
double *d_wavespeed;
hipMalloc((void**)&d_wavespeed,nelt[0]*lxyz* sizeof(double));
// hipMemset(d_totalh_temp, 0.0, 3*lxyzd*nelt*sizeof(double));
double *d_vtranstemp;
hipMalloc((void**)&d_vtranstemp,nelt[0]*lxyz* sizeof(double));
hipMemcpy(d_vtranstemp, &d_vtrans[(irho[0]-1)*lelt[0]*lxyz], nelt[0]*lxyz* sizeof(double), hipMemcpyDeviceToDevice);
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( wavevisc_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_t,d_csound, d_vx, d_vy, d_vz,ntot,d_wavespeed, lxyz,lx1[0],ly1[0],lz1[0],d_vtranstemp,c_max[0], ltot, d_meshh);
hipFree(d_wavespeed);
hipFree(d_vtranstemp);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
// if (code2 != hipSuccess){
printf("CUDA: End Wavevisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
// }
}
__global__ void max_to_trilin_gpu_kernel(double *t,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, double *xm1, double *ym1, double *zm1, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lxyz);
double p000 = t[2*ltot+e*lxyz];
double p100 = t[2*ltot+e*lxyz+(lx1-1)];
double p010 = t[2*ltot+e*lxyz+(ly1-1)*lx1];
double p110 = t[2*ltot+e*lxyz+(ly1-1)*lx1+(lx1-1)];
double p001 = t[2*ltot+e*lxyz+(lz1-1)*lxy];
double p101 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(lx1-1)];
double p011 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
double p111 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+(lx1-1)];
double c1=p100-p000;
double c2=p010-p000;
double c3=p001-p000;
double c4=p110-p010-p100+p000;
double c5=p011-p001-p010+p000;
double c6=p101-p001-p100+p000;
double c7=p111-p011-p101-p110+p100+p001+p010-p000;
double rdx=1.0/(xm1[e*lxyz+(lx1-1)]-xm1[e*lxyz]); // cubes only!!!
double rdy=1.0/(ym1[e*lxyz+(ly1-1)*lx1]-ym1[e*lxyz]);
double rdz=0.0;
if(if3d){ rdz=1.0/(zm1[e*lxyz+(lz1-1)*lxy]-zm1[e*lxyz]); }
int firstlx = id%lxyz;
double deltax=rdx*(xm1[id]-xm1[e*lxyz]) ;//! cubes only!!!
double deltay=rdy*(ym1[id]-ym1[e*lxyz]);
double deltaz=0.0;
if (if3d){ deltaz=rdz*(zm1[id]-zm1[e*lxyz]);}
t[2*ltot+id] =p000+c1*deltax+c2*deltay+c3*deltaz+ c4*deltax*deltay+c5*deltay*deltaz+ c6*deltaz*deltax+c7*deltay*deltaz*deltax;
}
}
extern "C" void max_to_trilin_gpu_wrapper_(int *glbblockSize1,double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_xm1, double *d_ym1, double *d_zm1, int *if3d ){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start max_to_trilin_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],if3d[0]);
#endif
//}
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( max_to_trilin_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_xm1, d_ym1, d_zm1, if3d[0]);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
//if (code2 != hipSuccess){
printf("CUDA: End max_to_trilin_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
//}
}
__global__ void resvisc_gpu_kernel1(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double *meshh ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
res2[id] = res2[id]*meshh[e]*meshh[e];
}
}
extern "C" void resvisc_gpu_wrapper1_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_meshh){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
//if (code1 != hipSuccess){
printf("CUDA: Start resvisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d,lz1 = %d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0]);
#endif
//}
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( resvisc_gpu_kernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_meshh);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
//if (code2 != hipSuccess){
printf("CUDA: End resvisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
//}
}
__global__ void resvisc_gpu_kernel2(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double c_sub_e, double maxdiff ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
res2[id] = fabs(res2[id]);
res2[id] = res2[id]*c_sub_e; // cmult
if(maxdiff !=0){
double consta = 1/maxdiff;
res2[id] = res2[id]*consta;
}
}
}
extern "C" void resvisc_gpu_wrapper2_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *c_sub_e, double *maxdiff){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start resvisc_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,c_sub_e=%lf,maxdiff= %.20lf, \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_sub_e[0],maxdiff[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize =glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( resvisc_gpu_kernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], c_sub_e[0], maxdiff[0]);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
// if (code2 != hipSuccess){
printf("CUDA: End resvisc_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code2));
#endif
// }
}
__global__ void evnsmooth_gpu_kernel(double *res2, double *t, int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, int kstart, int kend, int jstart, int jend, int istart, int iend,int ldim , double rldim, double *rtmp, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
if(t[2*ltot+id] <= res2[id]){
res2[id] = t[2*ltot+id];// wavevisc and resvisc are really res2 and t. but the dimensions are different. As I understand this will start from 0 and works well. Need to check with Dr.Tania . adeesha
}
//global syncthread is needed here. check with Dr.Tania. adeesha.
rtmp[id] = res2[id];
int ix= id % lx1;
int iy= (id/lx1)%ly1;
int iz = (id / (lx1*ly1))%lz1;
if((kstart<=iz && iz<=kend)&& (jstart<= iy && iy<= jend) && (istart<=ix && ix<=iend)){
int izm,izp;
if(if3d){
int km1=iz-1;
int kp1=iz+1;
int izm=km1;
if (km1 < 0){ izm=kp1;} // Guermond symmetry
izp=kp1;
if (kp1 > (lz1-1)){ izp=km1;} // Guermond symmetry
}
else{
izm=iz;
izp=iz;
}
int jm1=iy-1;
int jp1=iy+1;
int iym=jm1;
if (jm1 < 0){ iym=jp1;}// Guermond symmetry
int iyp=jp1;
if (jp1 > (ly1-1)){ iyp=jm1;} // Guermond symmetry
int im1=ix-1;
int ip1=ix+1;
int ixm=im1;
if (im1 < 0){ ixm=ip1;} // Guermond symmetry
int ixp=ip1;
if (ip1 > (lx1-1)) {ixp=im1 ;} // Guermond symmetry
double x0 = res2[e*lxyz+iz*lxy+iy*lx1+ix];
double x1 = res2[e*lxyz+iz*lxy+iy*lx1+ixm];
double x2 = res2[e*lxyz+iz*lxy+iy*lx1+ixp];
double x3 = res2[e*lxyz+iz*lxy+iym*lx1+ix];
double x4 = res2[e*lxyz+iz*lxy+iyp*lx1+ix];
double x5,x6;
if (if3d){
x5 = res2[e*lxyz+izm*lxy+iy*lx1+ixp];
x6 = res2[e*lxyz+izp*lxy+iy*lx1+ixp];
}
else {
x5=0.0;
x6=0.0;
}
rtmp[id]=0.25*(2.0*ldim*x0+x1+x2+x3+x4+x5+x6)*rldim;// check whether this is same as rtmp [id]. adeesha
}
res2[id]=rtmp[id];
}
}
extern "C" void evnsmooth_gpu_wrapper_(int *glbblockSize1,double *d_res2, double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1,int *kstart, int *kend, int *jstart, int *jend, int *istart, int *iend, int *ldim , double *rldim, int *if3d ){
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code1 = hipPeekAtLastError();
// if (code1 != hipSuccess){
printf("CUDA: Start evnsmooth_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt =%d ,lelt=%d,lx1=%d,ly1=%d,lz1=%d,kstart=%d,kend=%d,jstart=%d,jend=%d,istart=%d,iend=%d,ldim=%d ,rldim=%lf,if3d=%d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],kstart[0],kend[0],jstart[0],jend[0],istart[0],iend[0],ldim[0] ,rldim[0],if3d[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
double *d_rtmp;
hipMalloc((void**)&d_rtmp,nelt[0]*lxyz* sizeof(double));
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
hipLaunchKernelGGL(( evnsmooth_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0],(kstart[0]-1),(kend[0]-1),(jstart[0]-1),(jend[0]-1),(istart[0]-1),(iend[0]-1),ldim[0] ,rldim[0], d_rtmp, if3d[0] );
hipFree(d_rtmp);
#ifdef DEBUGPRINT
hipDeviceSynchronize();
hipError_t code2 = hipPeekAtLastError();
// if (code2 != hipSuccess){
printf("CUDA: End evnsmooth_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2));
#endif
// }
}
|
03a04df5f71e1ae979c3a9a4d1c5d8d5006e6a23.cu
|
#include <stdio.h>
#define DEBUGPRINT 0
__global__ void compute_entropy_gpu_kernel(double *tlag, double *pr, double *vtrans,int ntot, int irho, double ntol , double rgam, double gmaref,int ltot ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
double rho= fmax(vtrans[ltot*(irho-1)+id],ntol);
tlag[id]=rgam*rho*log(pr[id]/(pow(rho,gmaref) ));
}
}
extern "C" void compute_entropy_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_pr, double *d_vtrans,int *ntot, int *irho, double *ntol , double *rgam, double *gmaref, int *ltot){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start compute_entropy_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values ntot = %d, irho = %d, ntol = %lf, rgam = %lf, gmaref = %lf \n",ntot[0],irho[0],ntol[0],rgam[0],gmaref[0] );
#endif
//}
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot[0]/blockSize);
compute_entropy_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_pr,d_vtrans,ntot[0],irho[0],ntol[0],rgam[0],gmaref[0],ltot[0]);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
//if (code2 != cudaSuccess){
printf("CUDA: End compute_engropy_wrapper cuda status: %s\n",cudaGetErrorString(code1));
#endif
//}
}
__global__ void entropy_residual_flux_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *vx, double *vy, double *vz, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
if(stage==1){
res2[id]=tlag[id]-tlag[ltot*lorder+id] ;
}
else{
res2[id]=tlag[id]-tlag[ltot+id] ;
}
res2[id] = res2[id]*rdt;
// evaluate_entropy_flux(e)
totalh[id]= vx[id]*tlag[id];
totalh[lxyzd+id] = vy[id]*tlag[id];
if(if3d){totalh[lxyzd*2+id] = vz[id]*tlag[id];}
//flux_div_mini(e)
}
}
__global__ void flux_div_mini_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int i= id % ldd;
if(id<ntot){
//something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha
if(if3d){
ud[id] = jacmi[id] *( rxm1[id]*ur1[i]+ sxm1[id]*us1[i]+txm1[id]*ut1[i]);
ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[i]+ sym1[id]*us2[i]+txm1[id]*ut2[i]);
ud[id] = ud[id] + jacmi[id] *( rzm1[id]*ur3[i]+ szm1[id]*us3[i]+tzm1[id]*ut3[i]);
}
else{
ud[id] = jacmi[id] *( rxm1[id]*ur1[i]+ sxm1[id]*us1[i]);
ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[i]+ sym1[id]*us2[i]);
}
//add 2
res2[id] = res2[id] + ud[id];
}
}
//mxm multiplication
__global__ void mxm1(double *a, int n1, double *b, int n2, double *c, int n3, int nelt, int aSize, int bSize, int cSize, int extraEq){
//calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c
//in fortran the original calculation was
// c(n3,n1) = b(n3,n2) X a(n2,n1)
// a,b,cSize are single element size
//extraEq, in case of a matrix has equation as an index
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<nelt*n1*n3){
int e = id/(n1*n3);
int rc = id%(n1*n3);
int i = rc/n3;
int j = rc%n3;
int cid = e*cSize + rc;
int aid = e*aSize + extraEq + i*n2;
int bid = e*bSize + j;
c[cid] = 0;
for(int k = 0; k<n2; k++)
c[cid]+=a[aid+k]*b[bid+k*n3];
}
}
extern "C" void entropy_residual_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_res2,int *ntot, double *rdt, int *stage, int *lorder,int *ltot, int *lxd, int *lyd, int *lzd, double *d_vx, double *d_vy, double *d_vz, int *lx1, int *ly1, int *lz1, double *d_jacmi, double *d_rxm1, double *d_sxm1, double *d_txm1, double *d_rym1, double *d_sym1, double *d_tym1,double *d_rzm1, double *d_szm1, double *d_tzm1,int *if3d,int *nelt, double *d_dxm1, double *d_dxtm1 ){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start entropy_residual_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start entropy_residual_gpu_wrapper values rdt = %lf, stage = %d, lorder= %d,ltot = %d,lxd = %d, lyd = %d, lzd = %d, lx1 = %d,ly1 = %d,lz1 = %d,if3d = %d,nelt = %d \n",rdt[0], stage[0],lorder[0],ltot[0],lxd[0], lyd[0],lzd[0],lx1[0],ly1[0],lz1[0],if3d[0],nelt[0]);
#endif
//}
double *d_totalh_temp; // Anyway d_totalh seems not needed. check with Dr.Tania. adeesha
double *d_ur1;
double *d_us1;
double *d_ut1;
double *d_ur2;
double *d_us2;
double *d_ut2;
double *d_ur3;
double *d_us3;
double *d_ut3;
double *d_ud;
int lxyzd = lxd[0]*lyd[0]*lzd[0];
int ldd = lx1[0]*ly1[0]*lz1[0];
cudaMalloc((void**)&d_totalh_temp,3*lxyzd *nelt[0]* sizeof(double));
cudaMalloc((void**)&d_ur1,ldd * nelt[0]*sizeof(double)); //nelt[0] added later. need to double check.
cudaMalloc((void**)&d_us1,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ut1,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ur2,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_us2,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ut2,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ur3,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_us3,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ut3,ldd * nelt[0]*sizeof(double));
cudaMalloc((void**)&d_ud,nelt[0]*ldd * sizeof(double));
cudaMemset(d_totalh_temp, 0.0, 3*lxyzd*nelt[0]*sizeof(double));
cudaMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double));
cudaMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double));
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot[0]/blockSize);
entropy_residual_flux_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_vx, d_vy, d_vz, if3d[0]);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after kernel 1cuda status: %s\n",cudaGetErrorString(code1));
#endif
int mdm1 = lx1[0]-1;
// Following is the local_grad3 function
int nx_2 = lx1[0]*lx1[0];
int nx_3 = nx_2*lx1[0];
int nxd_2 = nx_2;
int nxd_3 = nx_3;
//ur(nx,nx*nx) = D(nx,nx) * u(nx,nx*nx) fortran
//ur(nx*nx,nx) = u(nx*nx,nx) * D(nx,nx) C
if(if3d[0]){
blockSize=glbblockSize1[0], gridSize;
// for ur1 us1 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_totalh_temp,nx_2, d_dxm1, lx1[0], d_ur1, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after 1st mxm 1cuda status: %s\n",cudaGetErrorString(code1));
#endif
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+k*nx_2, lx1[0],d_us1+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after for loop mxm 1cuda status: %s\n",cudaGetErrorString(code1));
#endif
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp, lx1[0], d_ut1, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after 3rd mxm 1cuda status: %s\n",cudaGetErrorString(code1));
#endif
// for ur2 us2 and ut2
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_totalh_temp+lxyzd*nelt[0],nx_2, d_dxm1, lx1[0], d_ur2, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0]+k*nx_2, lx1[0],d_us2+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0], lx1[0], d_ut2, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
// for ur3 us3 and ut3
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_totalh_temp+2*lxyzd*nelt[0],nx_2, d_dxm1, lx1[0], d_ur3, lx1[0], nelt[0], nx_3, 0, nxd_3, 0);//ur,us, ut should be indexed by nxd
for(int k = 0; k<lx1[0]; k++){
//usk(nx,nx) = uk(nx,nx) * dt(nx,nx) fortran
//usk(nx,nx) = dt(nx,nx) * uk(nx,nx) C
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+2*lxyzd*nelt[0]+k*nx_2, lx1[0],d_us3+k*nx_2, lx1[0], nelt[0], 0, nx_3, nxd_3, 0);
}
//ut(nx_2,nx) = u(nx_2,nx) * dt(nx,nx) fortran
//ut(nx,nx_2) = dt(nx,nx) * u(nx,nx_2) C
gridSize = (int)ceil((float)nelt[0]*nx_3/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+2*lxyzd*nelt[0], lx1[0], d_ut3, nx_2, nelt[0], 0, nx_3, nxd_3, 0);
}else{
// for ur1 us1 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_totalh_temp,lx1[0], d_dxm1, lx1[0], d_ur1, lx1[0], nelt[0], nx_2, 0, nxd_2, 0);//ur,us, ut should be indexed by nxd
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp, lx1[0],d_us1, lx1[0], nelt[0], 0, nx_2, nxd_2, 0);
// for ur2 us2 and ut1
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_totalh_temp+lxyzd*nelt[0],lx1[0], d_dxm1, lx1[0], d_ur2, lx1[0], nelt[0], nx_2, 0, nxd_2, 0);//ur,us, ut should be indexed by nxd
gridSize = (int)ceil((float)nelt[0]*nx_2/blockSize);
mxm1<<<gridSize, blockSize>>>(d_dxtm1,lx1[0], d_totalh_temp+lxyzd*nelt[0], lx1[0],d_us2, lx1[0], nelt[0], 0, nx_2, nxd_2, 0);
}
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper before flux_div_mini_gpu_kernel cuda status: %s\n",cudaGetErrorString(code1));
#endif
flux_div_mini_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
code1 = cudaPeekAtLastError();
printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel cuda status: %s\n",cudaGetErrorString(code1));
#endif
cudaFree(d_totalh_temp);
cudaFree(d_ur1);
cudaFree(d_ur2);
cudaFree(d_ur3);
cudaFree(d_us1);
cudaFree(d_us2);
cudaFree(d_us3);
cudaFree(d_ut1);
cudaFree(d_ut2);
cudaFree(d_ut3);
cudaFree(d_ud);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
// if (code2 != cudaSuccess){
printf("CUDA: End entropy residual_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
//}
}
__global__ void wavevisc_gpu_kernel(double *t,double *csound, double *vx, double *vy, double *vz, int ntot, double *wavespeed,int lxyz, int lx1, int ly1, int lz1, double *vtranstmp, double c_max,int ltot, double *meshh ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
wavespeed[id]= csound [id] +sqrtf(vx[id]*vx[id]+vy[id]*vy[id]+vz[id]*vz[id] ) ;
// find max of wavespeed using reduction
__syncthreads();
unsigned int i = lxyz/2;
int e= id/(lx1*ly1*lz1);
int startofcurrentelement = id-e;
while(i != 0){
if(id-startofcurrentelement < i){
wavespeed[id] = fmaxf(wavespeed[id], wavespeed[id + i]);
}
__syncthreads();
i /= 2;
}
double maxeig = wavespeed[id-e];
// find max of vtrans using reduction. But never used? check with Dr.Tania
//i = lxyz/2;
//int e= id/(lx1*ly1*lz1);
//int startofcurrentelement = id-e;
//while(i != 0){
// if(id-startofcurrentelement < i){
// vtranstmp[id] = fmaxf(vtranstmp[id], vtranstmp[id + i]);
// }
// __syncthreads();
// i /= 2;
//}
//int rhomax = vtranstmp[id-e];
t[2*ltot+id] = c_max*maxeig*meshh[e];
// if(id<10){
// printf("$$$ print from cuda maxeig = %lf t[2*ltot+id]= %lf meshh[e]=%lf \n",maxeig,t[2*ltot+id],meshh[e]);
// }
}
}
extern "C" void wavevisc_gpu_wrapper_(int *glbblockSize1,double *d_t, double *d_csound,double *d_vx, double *d_vy, double *d_vz, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_vtrans, double *c_max, double *d_meshh, int *irho ){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start wavevisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d, lz1= %d,c_max= %lf,irho= %d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_max[0],irho[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
double *d_wavespeed;
cudaMalloc((void**)&d_wavespeed,nelt[0]*lxyz* sizeof(double));
// cudaMemset(d_totalh_temp, 0.0, 3*lxyzd*nelt*sizeof(double));
double *d_vtranstemp;
cudaMalloc((void**)&d_vtranstemp,nelt[0]*lxyz* sizeof(double));
cudaMemcpy(d_vtranstemp, &d_vtrans[(irho[0]-1)*lelt[0]*lxyz], nelt[0]*lxyz* sizeof(double), cudaMemcpyDeviceToDevice);
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
wavevisc_gpu_kernel<<<gridSize, blockSize>>>(d_t,d_csound, d_vx, d_vy, d_vz,ntot,d_wavespeed, lxyz,lx1[0],ly1[0],lz1[0],d_vtranstemp,c_max[0], ltot, d_meshh);
cudaFree(d_wavespeed);
cudaFree(d_vtranstemp);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
// if (code2 != cudaSuccess){
printf("CUDA: End Wavevisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
// }
}
__global__ void max_to_trilin_gpu_kernel(double *t,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, double *xm1, double *ym1, double *zm1, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lxyz);
double p000 = t[2*ltot+e*lxyz];
double p100 = t[2*ltot+e*lxyz+(lx1-1)];
double p010 = t[2*ltot+e*lxyz+(ly1-1)*lx1];
double p110 = t[2*ltot+e*lxyz+(ly1-1)*lx1+(lx1-1)];
double p001 = t[2*ltot+e*lxyz+(lz1-1)*lxy];
double p101 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(lx1-1)];
double p011 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1];
double p111 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+(lx1-1)];
double c1=p100-p000;
double c2=p010-p000;
double c3=p001-p000;
double c4=p110-p010-p100+p000;
double c5=p011-p001-p010+p000;
double c6=p101-p001-p100+p000;
double c7=p111-p011-p101-p110+p100+p001+p010-p000;
double rdx=1.0/(xm1[e*lxyz+(lx1-1)]-xm1[e*lxyz]); // cubes only!!!
double rdy=1.0/(ym1[e*lxyz+(ly1-1)*lx1]-ym1[e*lxyz]);
double rdz=0.0;
if(if3d){ rdz=1.0/(zm1[e*lxyz+(lz1-1)*lxy]-zm1[e*lxyz]); }
int firstlx = id%lxyz;
double deltax=rdx*(xm1[id]-xm1[e*lxyz]) ;//! cubes only!!!
double deltay=rdy*(ym1[id]-ym1[e*lxyz]);
double deltaz=0.0;
if (if3d){ deltaz=rdz*(zm1[id]-zm1[e*lxyz]);}
t[2*ltot+id] =p000+c1*deltax+c2*deltay+c3*deltaz+ c4*deltax*deltay+c5*deltay*deltaz+ c6*deltaz*deltax+c7*deltay*deltaz*deltax;
}
}
extern "C" void max_to_trilin_gpu_wrapper_(int *glbblockSize1,double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_xm1, double *d_ym1, double *d_zm1, int *if3d ){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start max_to_trilin_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],if3d[0]);
#endif
//}
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
max_to_trilin_gpu_kernel<<<gridSize, blockSize>>>(d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_xm1, d_ym1, d_zm1, if3d[0]);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
//if (code2 != cudaSuccess){
printf("CUDA: End max_to_trilin_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
//}
}
__global__ void resvisc_gpu_kernel1(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double *meshh ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
res2[id] = res2[id]*meshh[e]*meshh[e];
}
}
extern "C" void resvisc_gpu_wrapper1_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_meshh){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
//if (code1 != cudaSuccess){
printf("CUDA: Start resvisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d,lz1 = %d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0]);
#endif
//}
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
resvisc_gpu_kernel1<<<gridSize, blockSize>>>(d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_meshh);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
//if (code2 != cudaSuccess){
printf("CUDA: End resvisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
//}
}
__global__ void resvisc_gpu_kernel2(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double c_sub_e, double maxdiff ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
res2[id] = fabs(res2[id]);
res2[id] = res2[id]*c_sub_e; // cmult
if(maxdiff !=0){
double consta = 1/maxdiff;
res2[id] = res2[id]*consta;
}
}
}
extern "C" void resvisc_gpu_wrapper2_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *c_sub_e, double *maxdiff){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start resvisc_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,c_sub_e=%lf,maxdiff= %.20lf, \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_sub_e[0],maxdiff[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
int blockSize =glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
resvisc_gpu_kernel2<<<gridSize, blockSize>>>(d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], c_sub_e[0], maxdiff[0]);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
// if (code2 != cudaSuccess){
printf("CUDA: End resvisc_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code2));
#endif
// }
}
__global__ void evnsmooth_gpu_kernel(double *res2, double *t, int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, int kstart, int kend, int jstart, int jend, int istart, int iend,int ldim , double rldim, double *rtmp, int if3d ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<ntot){
int e= id/(lx1*ly1*lz1);
if(t[2*ltot+id] <= res2[id]){
res2[id] = t[2*ltot+id];// wavevisc and resvisc are really res2 and t. but the dimensions are different. As I understand this will start from 0 and works well. Need to check with Dr.Tania . adeesha
}
//global syncthread is needed here. check with Dr.Tania. adeesha.
rtmp[id] = res2[id];
int ix= id % lx1;
int iy= (id/lx1)%ly1;
int iz = (id / (lx1*ly1))%lz1;
if((kstart<=iz && iz<=kend)&& (jstart<= iy && iy<= jend) && (istart<=ix && ix<=iend)){
int izm,izp;
if(if3d){
int km1=iz-1;
int kp1=iz+1;
int izm=km1;
if (km1 < 0){ izm=kp1;} // Guermond symmetry
izp=kp1;
if (kp1 > (lz1-1)){ izp=km1;} // Guermond symmetry
}
else{
izm=iz;
izp=iz;
}
int jm1=iy-1;
int jp1=iy+1;
int iym=jm1;
if (jm1 < 0){ iym=jp1;}// Guermond symmetry
int iyp=jp1;
if (jp1 > (ly1-1)){ iyp=jm1;} // Guermond symmetry
int im1=ix-1;
int ip1=ix+1;
int ixm=im1;
if (im1 < 0){ ixm=ip1;} // Guermond symmetry
int ixp=ip1;
if (ip1 > (lx1-1)) {ixp=im1 ;} // Guermond symmetry
double x0 = res2[e*lxyz+iz*lxy+iy*lx1+ix];
double x1 = res2[e*lxyz+iz*lxy+iy*lx1+ixm];
double x2 = res2[e*lxyz+iz*lxy+iy*lx1+ixp];
double x3 = res2[e*lxyz+iz*lxy+iym*lx1+ix];
double x4 = res2[e*lxyz+iz*lxy+iyp*lx1+ix];
double x5,x6;
if (if3d){
x5 = res2[e*lxyz+izm*lxy+iy*lx1+ixp];
x6 = res2[e*lxyz+izp*lxy+iy*lx1+ixp];
}
else {
x5=0.0;
x6=0.0;
}
rtmp[id]=0.25*(2.0*ldim*x0+x1+x2+x3+x4+x5+x6)*rldim;// check whether this is same as rtmp [id]. adeesha
}
res2[id]=rtmp[id];
}
}
extern "C" void evnsmooth_gpu_wrapper_(int *glbblockSize1,double *d_res2, double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1,int *kstart, int *kend, int *jstart, int *jend, int *istart, int *iend, int *ldim , double *rldim, int *if3d ){
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code1 = cudaPeekAtLastError();
// if (code1 != cudaSuccess){
printf("CUDA: Start evnsmooth_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1));
printf("CUDA: Start compute_entropy_gpu_wrapper values nelt =%d ,lelt=%d,lx1=%d,ly1=%d,lz1=%d,kstart=%d,kend=%d,jstart=%d,jend=%d,istart=%d,iend=%d,ldim=%d ,rldim=%lf,if3d=%d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],kstart[0],kend[0],jstart[0],jend[0],istart[0],iend[0],ldim[0] ,rldim[0],if3d[0]);
#endif
// }
int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0];
int lxyz = lx1[0]*ly1[0]*lz1[0];
int ltot = lelt[0]*lxyz;
double *d_rtmp;
cudaMalloc((void**)&d_rtmp,nelt[0]*lxyz* sizeof(double));
int blockSize = glbblockSize1[0], gridSize;
gridSize = (int)ceil((float)ntot/blockSize);
evnsmooth_gpu_kernel<<<gridSize, blockSize>>>(d_res2,d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0],(kstart[0]-1),(kend[0]-1),(jstart[0]-1),(jend[0]-1),(istart[0]-1),(iend[0]-1),ldim[0] ,rldim[0], d_rtmp, if3d[0] );
cudaFree(d_rtmp);
#ifdef DEBUGPRINT
cudaDeviceSynchronize();
cudaError_t code2 = cudaPeekAtLastError();
// if (code2 != cudaSuccess){
printf("CUDA: End evnsmooth_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2));
#endif
// }
}
|
be5cfe39eddc510996866884ea19b6f317ca7fd9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
<DUALSPHYSICS> Copyright (C) 2012 by Jose M. Dominguez, Dr. Alejandro Crespo, Prof. M. Gomez Gesteira, Anxo Barreiro, Dr. Benedict Rogers.
EPHYSLAB Environmental Physics Laboratory, Universidade de Vigo, Ourense, Spain.
School of Mechanical, Aerospace and Civil Engineering, University of Manchester, Manchester, U.K.
This file is part of DualSPHysics.
DualSPHysics is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
DualSPHysics is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License, along with DualSPHysics. If not, see <http://www.gnu.org/licenses/>.
*/
/// \file CudaSphFC.cu \brief Implements all the functions to compute forces on GPU.
//##############################################################################
//# Kernels for force computation (including Floating objects).
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph> __device__ void KerComputeForcesFluidBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned *idp,float massp2,float massftp1,float3 posp1,float3 velp1,float3 devsp1,float rhopp1,float3 &acep1,float3 &vcor,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
const float prrhop2=pospres2.w/(velrhop2.w*velrhop2.w);
float prs=devsp1.x+prrhop2;
float wab,frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
//-Tensile correction.
float fab=wab*CTE.cubic_odwdeltap;
fab*=fab; fab*=fab; //fab=fab^4
prs+=fab*(devsp1.y+ prrhop2*(pospres2.w>0? 0.01f: -0.2f) );
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq=2.f*qq+1.f;
const float wqq1=1.f-0.5f*qq;
const float wqq2=wqq1*wqq1;
wab=CTE.wendland_awen*wqq*wqq2*wqq2;
fac=CTE.wendland_bwen*qq*wqq2*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
if(floating)massp2=(p2<CTE.nbound||idp[p2]<CTE.nbound? CTE.massb: CTE.massf);
{//===== Aceleration =====
const float p_vpm=-prs*(floating? massp2*massftp1: massp2);
acep1.x+=p_vpm*frx; acep1.y+=p_vpm*fry; acep1.z+=p_vpm*frz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
float robar=(rhopp1+velrhop2.w)*0.5f;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
{//-Artificial viscosity
if(dot<0){
const float csound=velrhop2.w*OVERRHOPZERO; //const float csound=CTE.cs0*powf(rrhop*OVERRHOPZERO,3);
const float cbar=(devsp1.z+ CTE.cs0*(csound*csound*csound) )*0.5f;
const float amubar=CTE.h*dot_rr2; //float amubar=CTE.h*dot/(rr2+CTE.eta2);
const float pi_visc=(-CTE.visco*cbar*amubar/robar)*massp2;
acep1.x-=pi_visc*frx; acep1.y-=pi_visc*fry; acep1.z-=pi_visc*frz;
}
}
visc=max(dot_rr2,visc); //reduction of viscdt will be performed on GPU.
}
//-Density derivative.
arp1+=massp2*(dvx*frx+dvy*fry+dvz*frz);
//-XSPH correction.
if(xsph){
const float wab_rhobar=massp2*(wab/robar);
vcor.x-=wab_rhobar * dvx;
vcor.y-=wab_rhobar * dvy;
vcor.z-=wab_rhobar * dvz;
}
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating> __device__ void KerComputeForcesBoundBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned* idp,float massf,float3 posp1,float3 velp1,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
float frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
float wqq2=(radgt? 2.0f-qq: qq); wqq2*=wqq2;
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq1=1.f-0.5f*qq;
fac=CTE.wendland_bwen*qq*wqq1*wqq1*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
visc=max(dot_rr2,visc); // reduction of viscdt will be performed on GPU.
}
//-Density derivative.
if(floating)arp1+=(idp[p2]<CTE.nbound? CTE.massb: CTE.massf)*(dvx*frx+dvy*fry+dvz*frz);
else arp1+=massf*(dvx*frx+dvy*fry+dvz*frz);
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph,unsigned ncellnv> __global__ void KerComputeForcesFluidNeigs
(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvb,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
float massftp1=(floating? (idp[p1]<CTE.nbound? CTE.massb: 1.f): 0);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massf),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massb),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){
r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r;
}
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,unsigned ncellnv> __global__ void KerComputeForcesBoundNeigs
(unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float *ar,float *viscdt)
{
unsigned p1=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p1<n){
float arp1=0,visc=0;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p1];
//-Interaction of boundary with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesBoundBox<tkernel,floating>(p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massf),posp1,velp1,arp1,visc);
}
//-Stores results.
if(arp1||visc){
ar[p1]+=arp1;
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph,unsigned hdiv> __global__ void KerComputeForcesFluid
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegb,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
float massftp1=(floating? (idp[p1]<CTE.nbound? CTE.massb: 1.f): 0);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massf),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massb),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){
r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r;
}
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,unsigned hdiv> __global__ void KerComputeForcesBound(unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float *ar,float *viscdt)
{
unsigned p1=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p1<n){
float arp1=0,visc=0;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p1];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction of boundary with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesBoundBox<tkernel,floating>(p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massf),posp1,velp1,arp1,visc);
}
}
//-Stores results.
if(arp1||visc){
ar[p1]+=arp1;
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//##############################################################################
//# Kernels for force computation when using Laminar+SPS viscosity and KGC.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph> __device__ void KerComputeForcesFullFluidBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned *idp,const tsymatrix3f *tau,float massp2,float3 posp1,float3 velp1,float3 devsp1,float rhopp1,const tsymatrix3f &matkgcp1,const tsymatrix3f &taup1,tsymatrix3f &csphp1,float3 &acep1,float3 &vcor,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
const float prrhop2=pospres2.w/(velrhop2.w*velrhop2.w);
float prs=devsp1.x+prrhop2;
float wab,frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
//-Tensile correction.
float fab=wab*CTE.cubic_odwdeltap;
fab*=fab; fab*=fab; //fab=fab^4
prs+=fab*(devsp1.y+ prrhop2*(pospres2.w>0? 0.01f: -0.2f) );
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq=2.f*qq+1.f;
const float wqq1=1.f-0.5f*qq;
const float wqq2=wqq1*wqq1;
wab=CTE.wendland_awen*wqq*wqq2*wqq2;
fac=CTE.wendland_bwen*qq*wqq2*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
//===== Kernel Gradient Correction =====
if(kgc==true){
float frx2=matkgcp1.xx*frx + matkgcp1.xy*fry + matkgcp1.xz*frz;
float fry2=matkgcp1.xy*frx + matkgcp1.yy*fry + matkgcp1.yz*frz;
float frz2=matkgcp1.xz*frx + matkgcp1.yz*fry + matkgcp1.zz*frz;
frx=frx2; fry=fry2; frz=frz2;
}
{//===== Aceleration =====
const float p_vpm=-prs*massp2;
acep1.x+=p_vpm*frx; acep1.y+=p_vpm*fry; acep1.z+=p_vpm*frz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
float robar=(rhopp1+velrhop2.w)*0.5f;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
if(tvisco==VISCO_Artificial){//-Artificial viscosity.
if(dot<0){
const float csound=velrhop2.w*OVERRHOPZERO; //const float csound=CTE.cs0*powf(rrhop*OVERRHOPZERO,3);
const float cbar=(devsp1.z+ CTE.cs0*(csound*csound*csound) )*0.5f;
const float amubar=CTE.h*dot_rr2; //float amubar=CTE.h*dot/(rr2+CTE.eta2);
const float pi_visc=(-CTE.visco*cbar*amubar/robar)*massp2;
acep1.x-=pi_visc*frx; acep1.y-=pi_visc*fry; acep1.z-=pi_visc*frz;
}
}
if(tvisco==VISCO_LaminarSPS){//-LaminarSPS viscosity.
const float temp=2.0f*CTE.visco/((rr2+CTE.eta2)*robar);
const float vtemp=massp2*temp*(drx*frx+dry*fry+drz*frz);
acep1.x+=vtemp*dvx; acep1.y+=vtemp*dvy; acep1.z+=vtemp*dvz;
// SPS turbulence model.
tsymatrix3f tausum=taup1;
if(p2>CTE.nbound){
tausum=tau[p2-CTE.nbound];
tausum.xx+=taup1.xx;
tausum.xy+=taup1.xy;
tausum.xz+=taup1.xz;
tausum.yy+=taup1.yy;
tausum.yz+=taup1.yz;
tausum.zz+=taup1.zz;
}
acep1.x+=massp2*(tausum.xx*frx+tausum.xy*fry+tausum.xz*frz);
acep1.y+=massp2*(tausum.xy*frx+tausum.yy*fry+tausum.yz*frz);
acep1.z+=massp2*(tausum.xz*frx+tausum.yz*fry+tausum.zz*frz);
// CSPH terms.
const float volp2=-massp2/velrhop2.w;
float dv=dvx*volp2; csphp1.xx+=dv*frx; csphp1.xy+=dv*fry; csphp1.xz+=dv*frz;
dv=dvy*volp2; csphp1.xy+=dv*frx; csphp1.yy+=dv*fry; csphp1.yz+=dv*frz;
dv=dvz*volp2; csphp1.xz+=dv*frx; csphp1.yz+=dv*fry; csphp1.zz+=dv*frz;
}
visc=max(dot_rr2,visc); // reduction of viscdt will be performed on GPU.
}
//-Density derivative.
arp1+=massp2*(dvx*frx+dvy*fry+dvz*frz);
//-XSPH correction.
if(xsph){
const float wab_rhobar=massp2*(wab/robar);
vcor.x-=wab_rhobar * dvx;
vcor.y-=wab_rhobar * dvy;
vcor.z-=wab_rhobar * dvz;
}
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph,unsigned ncellnv> __global__ void KerComputeForcesFullFluidNeigs
(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvb,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt,const tsymatrix3f *matkgc,const tsymatrix3f *tau,tsymatrix3f *csph)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
tsymatrix3f matkgcp1=matkgc[p];
tsymatrix3f taup1=tau[p];
tsymatrix3f csphp1={0,0,0,0,0,0};
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,tau,CTE.massf,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,tau,CTE.massb,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){ r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r; }
if(visc>viscdt[p1])viscdt[p1]=visc;
if(tvisco==VISCO_LaminarSPS)csph[p]=csphp1;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph,unsigned hdiv> __global__ void KerComputeForcesFullFluid
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegb,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt,const tsymatrix3f *matkgc,const tsymatrix3f *tau,tsymatrix3f *csph)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
tsymatrix3f matkgcp1=matkgc[p];
tsymatrix3f taup1=tau[p];
tsymatrix3f csphp1={0,0,0,0,0,0};
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,pini,pfin,pospres,velrhop,idp,tau,CTE.massf,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,pini,pfin,pospres,velrhop,idp,tau,CTE.massb,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){ r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r; }
if(visc>viscdt[p1])viscdt[p1]=visc;
if(tvisco==VISCO_LaminarSPS)csph[p]=csphp1;
}
}
}
//##############################################################################
//# Kernel: CELLMODE_Hneigs for Shepard density filter.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles.
//------------------------------------------------------------------------------
template<TpKernel tkernel> __device__ void KerComputeForcesShepardBox(unsigned p1,const unsigned &pini,const unsigned &pfin,float massfluid,float4 *posrhop,float4 posrhop1,float &fdwabp1,float &fdrhopp1)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 posrhop2=posrhop[p2];
float drx=posrhop1.x-posrhop2.x;
float dry=posrhop1.y-posrhop2.y;
float drz=posrhop1.z-posrhop2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
float wab;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
float wqq2=1.f-0.5f*qq; wqq2*=wqq2;
wab=CTE.wendland_awen*(2*qq+1)*wqq2*wqq2;
}
}
fdwabp1+=wab*(massfluid/posrhop2.w);
fdrhopp1+=wab;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction Shepard between fluid particles.
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned ncellnv> __global__ void KerComputeForcesShepardNeigs(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvf,float massfluid,float4 *posrhop,float *fdrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float4 posrhop1=posrhop[p1];
float fdrhopp1=CTE.cteshepard;
float fdwabp1=fdrhopp1*(massfluid/posrhop1.w);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesShepardBox <tkernel> (p1,rg.x,rg.y,massfluid,posrhop,posrhop1,fdwabp1,fdrhopp1);
}
//-Stores results.
fdrhop[p]=(fdrhopp1*massfluid)/fdwabp1;
}
}
//##############################################################################
//# Kernel: CELLMODE_2H & CELLMODE_H for Shepard density filter
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned hdiv> __global__ void KerComputeForcesShepard(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegf,float massfluid,float4 *posrhop,float *fdrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float4 posrhop1=posrhop[p1];
float fdrhopp1=CTE.cteshepard;
float fdwabp1=fdrhopp1*(massfluid/posrhop1.w);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesShepardBox <tkernel> (p1,pini,pfin,massfluid,posrhop,posrhop1,fdwabp1,fdrhopp1);
}
}
//-Stores results.
fdrhop[p]=(fdrhopp1*massfluid)/fdwabp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that makes ace[].y equal to zero.
//------------------------------------------------------------------------------
__global__ void KerResetAcey(unsigned n,unsigned npb,float3 *ace)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n)ace[p+npb].y=0;
}
//##############################################################################
//# Kernel: CELLMODE_Hneigs for MatrixKGC when kernel gradient correction.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles
//------------------------------------------------------------------------------
template<TpKernel tkernel> __device__ void KerComputeMatrixKgcBox(unsigned p1,const unsigned &pini,const unsigned &pfin,float massp2,const float4 *posrhop,const float3 &posp1,tsymatrix3f &matkgcp1)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 posrhopp2=posrhop[p2];
float drx=posp1.x-posrhopp2.x;
float dry=posp1.y-posrhopp2.y;
float drz=posp1.z-posrhopp2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
float frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const float wqq1=2.0f-qq;
fac=(qq>1? CTE.cubic_c2*(wqq1*wqq1): (CTE.cubic_c1+CTE.cubic_d1*qq)*qq) /rad;
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq1=1.f-0.5f*qq;
fac=CTE.wendland_bwen*qq*wqq1*wqq1*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
//===== KGC Matrix =====
const float volp2=-massp2/posrhopp2.w;
float fr=frx*volp2; matkgcp1.xx+=fr*drx; matkgcp1.xy+=fr*dry; matkgcp1.xz+=fr*drz;
fr=fry*volp2; matkgcp1.xy+=fr*drx; matkgcp1.yy+=fr*dry; matkgcp1.yz+=fr*drz;
fr=frz*volp2; matkgcp1.xz+=fr*drx; matkgcp1.yz+=fr*dry; matkgcp1.zz+=fr*drz;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell.
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned ncellnv> __global__ void KerComputeMatrixKgcNeigs
(unsigned pini,unsigned n,unsigned ncells,const unsigned *cell,const uint2 *cellnvb,const uint2 *cellnvf,const float4 *posrhop,float massb,float massf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
tsymatrix3f matkgcp1={0,0,0,0,0,0};
float4 r=posrhop[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeMatrixKgcBox<tkernel> (p1,rg.x,rg.y,massf,posrhop,posp1,matkgcp1);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeMatrixKgcBox<tkernel> (p1,rg.x,rg.y,massb,posrhop,posp1,matkgcp1);
}
//-Stores results.
matkgc[p]=matkgcp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned hdiv> __global__ void KerComputeMatrixKgc
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,const unsigned *cell,const int2 *cellbegb,const int2 *cellbegf,const float4 *posrhop,float massb,float massf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
tsymatrix3f matkgcp1={0,0,0,0,0,0};
float4 r=posrhop[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeMatrixKgcBox<tkernel> (p1,pini,pfin,massf,posrhop,posp1,matkgcp1);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeMatrixKgcBox<tkernel> (p1,pini,pfin,massb,posrhop,posp1,matkgcp1);
}
}
//-Stores results.
matkgc[p]=matkgcp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for interaction MatrixKgc.
//------------------------------------------------------------------------------
__global__ void KerPreInteraction_MatrixKgc(unsigned np,const float3 *pos,const float *rhop,float4 *posrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<np){
float3 rpos=pos[p];
posrhop[p]=make_float4(rpos.x,rpos.y,rpos.z,rhop[p]);
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that inverts MatrixKgc.
//------------------------------------------------------------------------------
template<bool simula2d> __global__ void KerInvertMatrixKgc(unsigned npf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<npf){
tsymatrix3f mat=matkgc[p];
//-Matrix in case of 2D simulation.
if(simula2d)mat.yy=1.0f;
//-Inversion of a symmetric matrix.
mat.xy*=0.5f;
mat.xz*=0.5f;
mat.yz*=0.5f;
float det=mat.xx*mat.yy*mat.zz + 2.f*mat.xy*mat.yz*mat.xz - mat.xz*mat.yy*mat.xz - mat.xx*mat.yz*mat.yz - mat.xy*mat.xy*mat.zz; //-Determinant of the matrix.
//-Matrix inversion.
tsymatrix3f invmat={1,0,0 ,1,0 ,1}; //-Matrix when no correction will be applied.
if(fabs(det)>0.01 && fabs(mat.xx)>MATRIXKGC_CONTROL && fabs(mat.yy)>MATRIXKGC_CONTROL && fabs(mat.zz)>MATRIXKGC_CONTROL){
invmat.xx=(mat.yy*mat.zz-mat.yz*mat.yz)/det;
invmat.xy=(mat.xz*mat.yz-mat.xy*mat.zz)/det;
invmat.xz=(mat.xy*mat.yz-mat.yy*mat.xz)/det;
invmat.yy=(mat.xx*mat.zz-mat.xz*mat.xz)/det;
invmat.yz=(mat.xy*mat.xz-mat.xx*mat.yz)/det;
invmat.zz=(mat.xx*mat.yy-mat.xy*mat.xy)/det;
}
matkgc[p]=invmat;
}
}
//==============================================================================
/// Computes matrix of kernel gradient correction (KGC).
//==============================================================================
template<TpKernel tkernel,unsigned ncellnv> void CsInteraction_MatrixKgc(StDeviceContext *dc,StDeviceCte *cte){
TmgStart(dc->timers,TMG_CfMatrixKgc);
hipMemset(dc->matkgc,0,sizeof(tsymatrix3f)*dc->npf); //matkgc[]=0
dim3 sgrid=CsGetGridSize(dc->npok,BLOCKSIZE);
hipLaunchKernelGGL(( KerPreInteraction_MatrixKgc) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npok,dc->pos,dc->rhop,dc->pospres);
switch(dc->cellmode){
case CELLMODE_Hneigs:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=dc->bsmatrixkgc;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
hipLaunchKernelGGL(( KerComputeMatrixKgcNeigs<tkernel,ncellnv>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,cte->massb,cte->massf,dc->matkgc);
}break;
case CELLMODE_2H:
case CELLMODE_H:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=dc->bsmatrixkgc;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
hipLaunchKernelGGL(( KerComputeMatrixKgc<tkernel,(ncellnv==9? 1: 2)>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,cte->massb,cte->massf,dc->matkgc);
}break;
default: throw std::string("CsInteraction_MatrixKgc> CellMode unrecognised.");
}
//-Inversion of a matrix matkgc.
if(dc-hipLaunchKernelGGL((>simulate2d)KerInvertMatrixKgc<true>) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npf,dc->matkgc);
elsehipLaunchKernelGGL(( KerInvertMatrixKgc<false>) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npf,dc->matkgc);
CheckErrorCuda("Failed while executing kernels of MatrixKgc interaction.");
TmgStop(dc->timers,TMG_CfMatrixKgc);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for force computation.
//------------------------------------------------------------------------------
__global__ void KerPreInteraction_Forces(unsigned n,unsigned npb,float gamma,float b,float3 gravity,const float3 *pos,const float3 *vel,const float *rhop,float *csound,float3 *ace,float4 *pospres,float4 *velrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
float rrhop=rhop[p];
float press=b*(powf(rrhop*OVERRHOPZERO,gamma)-1.0f);
float csoun=rrhop*OVERRHOPZERO;
csound[p]=CTE.cs0*(csoun*csoun*csoun);
float3 r=pos[p]; pospres[p]=make_float4(r.x,r.y,r.z,press);
r=vel[p]; velrhop[p]=make_float4(r.x,r.y,r.z,rrhop);
ace[p]=(p<npb? make_float3(0,0,0): gravity);
}
}
//==============================================================================
/// Prepares variables for force computation.
//==============================================================================
void CsPreInteraction_Forces(StDeviceContext *dc,bool xsph){
TmgStart(dc->timers,TMG_CfPreForces);
hipMemset(dc->ar,0,sizeof(float)*dc->npok); //ar[]=0
if(xsph)hipMemset(dc->velxcor,0,sizeof(float3)*dc->npok); //velxcor[]=0
if(dc->tvisco==VISCO_LaminarSPS)hipMemset(dc->csph,0,sizeof(tsymatrix3f)*(dc->npf)); //csph[]={0,..,0}
hipMemset(dc->viscdt,0,sizeof(float)*dc->npok);
dim3 sgrid=CsGetGridSize(dc->npok,BLOCKSIZE);
hipLaunchKernelGGL(( KerPreInteraction_Forces) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npok,dc->npb,dc->gamma,dc->b,dc->gravity,dc->pos,dc->vel,dc->rhop,dc->csound,dc->ace,dc->pospres,dc->velrhop);
CheckErrorCuda("Failed preparing vars for interaction forces.");
TmgStop(dc->timers,TMG_CfPreForces);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes sub-particle stress tensor (Tau) for SPS turbulence model.
//------------------------------------------------------------------------------
__global__ void KerSPSCalcTau(unsigned n,unsigned npb,float smag,float blin,const float *rhop,const tsymatrix3f *csph,tsymatrix3f *tau)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
tsymatrix3f rcsph=csph[p];
const float pow1=rcsph.xx*rcsph.xx + rcsph.yy*rcsph.yy + rcsph.zz*rcsph.zz;
const float prr=pow1+pow1 + rcsph.xy*rcsph.xy + rcsph.xz*rcsph.xz + rcsph.yz*rcsph.yz;
const float visc_SPS=smag*sqrt(prr);
const float div_u=rcsph.xx+rcsph.yy+rcsph.zz;
const float sps_k=(2.0f/3.0f)*visc_SPS*div_u;
const float sps_Blin=blin*prr;
const float sumsps=-(sps_k+sps_Blin);
const float twovisc_SPS=(visc_SPS+visc_SPS);
const float one_rho2 = 1.0f/rhop[p+npb];
tsymatrix3f rtau;
rtau.xx=one_rho2*(twovisc_SPS*rcsph.xx +sumsps);
rtau.xy=one_rho2*(visc_SPS*rcsph.xy);
rtau.xz=one_rho2*(visc_SPS*rcsph.xz);
rtau.yy=one_rho2*(twovisc_SPS*rcsph.yy +sumsps);
rtau.yz=one_rho2*(visc_SPS*rcsph.yz);
rtau.zz=one_rho2*(twovisc_SPS*rcsph.zz +sumsps);
tau[p]=rtau;
}
}
//==============================================================================
/// Interaction between particles.
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool kgc,bool floating,unsigned ncellnv> void CsInteraction_Forces(StDeviceContext *dc,StDeviceCte *cte){
//-Interaction to compute matrix for Kernel Gradient Correction.
if(dc->kgc)CsInteraction_MatrixKgc<tkernel,ncellnv>(dc,cte);
//-Interaction Forces
CsPreInteraction_Forces(dc,xsph);
switch(dc->cellmode){
case CELLMODE_Hneigs:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=(xsph? dc->bsforcesfluid: dc->bsforcesfluidcorr);
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
TmgStart(dc->timers,TMG_CfForcesFluid);
if(kgc||tvisco=hipLaunchKernelGGL((=VISCO_LaminarSPS)KerComputeForcesFullFluidNeigs<tkernel,tvisco,kgc,xsph,ncellnv>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt,dc->matkgc,dc->tau,dc->csph);
elsehipLaunchKernelGGL(( KerComputeForcesFluidNeigs<tkernel,floating,xsph,ncellnv>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesFluid);
//-Interaction Boundary-Fluid.
bsize=dc->bsforcesbound;
dim3 sgridb=CsGetGridSize(dc->npb,bsize);
TmgStart(dc->timers,TMG_CfForcesBound);
hipLaunchKernelGGL(( KerComputeForcesBoundNeigs<tkernel,floating,ncellnv>) , dim3(sgridb),dim3(bsize), 0, 0, dc->npb,dc->nctotmax-1,dc->cellb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ar,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesBound);
}break;
case CELLMODE_2H:
case CELLMODE_H:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=(xsph? dc->bsforcesfluid: dc->bsforcesfluidcorr);
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
TmgStart(dc->timers,TMG_CfForcesFluid);
if(kgc||tvisco=hipLaunchKernelGGL((=VISCO_LaminarSPS)KerComputeForcesFullFluid<tkernel,tvisco,kgc,xsph,(ncellnv==9? 1: 2)>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt,dc->matkgc,dc->tau,dc->csph);
elsehipLaunchKernelGGL(( KerComputeForcesFluid<tkernel,floating,xsph,(ncellnv==9? 1: 2)>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesFluid);
//-Interaction Boundary-Fluid.
bsize=dc->bsforcesbound;
dim3 sgridb=CsGetGridSize(dc->npb,bsize);
TmgStart(dc->timers,TMG_CfForcesBound);
hipLaunchKernelGGL(( KerComputeForcesBound<tkernel,floating,(ncellnv==9? 1: 2)>) , dim3(sgridb),dim3(bsize), 0, 0, dc->npb,dc->ncx,dc->ncy,dc->ncz,dc->cellb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ar,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesBound);
}break;
default: throw std::string("CsInteraction_Forces> CellMode unrecognised.");
}
//-Computes values of tau[] starting from csph[].
if(tvisco==VISCO_LaminarSPS){
dim3 sgridf=CsGetGridSize(dc->npf,BLOCKSIZE);
hipLaunchKernelGGL(( KerSPSCalcTau) , dim3(sgridf),dim3(BLOCKSIZE), 0, 0, dc->npf,dc->npb,dc->smag,dc->blin,dc->rhop,dc->csph,dc->tau);
}
//-Cancels forces in Y direction when 2D case.
if(dc->simulate2d){
dim3 sgrid=CsGetGridSize(dc->npf,BLOCKSIZE);
hipLaunchKernelGGL(( KerResetAcey) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npf,dc->npb,dc->ace);
}
CheckErrorCuda("Failed while executing kernels of interaction forces.");
}
//==============================================================================
/// Calls CsInteraction_Forces().
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool kgc,bool floating>void CsCallInteraction_Forces3(StDeviceContext *dc,StDeviceCte *cte){
if(dc->ncellnv==9)CsInteraction_Forces<xsph,tkernel,tvisco,kgc,floating,9>(dc,cte);
else if(dc->ncellnv==25)CsInteraction_Forces<xsph,tkernel,tvisco,kgc,floating,25>(dc,cte);
}
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool floating>void CsCallInteraction_Forces2(StDeviceContext *dc,StDeviceCte *cte){
if(dc->kgc)CsCallInteraction_Forces3<xsph,tkernel,tvisco,true,floating>(dc,cte);
else CsCallInteraction_Forces3<xsph,tkernel,tvisco,false,floating>(dc,cte);
}
//==============================================================================
template<bool xsph,TpKernel tkernel,bool floating>void CsCallInteraction_Forces1(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tvisco==VISCO_Artificial)CsCallInteraction_Forces2<xsph,tkernel,VISCO_Artificial,floating>(dc,cte);
else if(dc->tvisco==VISCO_LaminarSPS)CsCallInteraction_Forces2<xsph,tkernel,VISCO_LaminarSPS,floating>(dc,cte);
}
//==============================================================================
template<bool xsph,bool floating>void CsCallInteraction_Forces(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tkernel==KERNEL_Cubic)CsCallInteraction_Forces1<xsph,KERNEL_Cubic,floating>(dc,cte);
else if(dc->tkernel==KERNEL_Wendland)CsCallInteraction_Forces1<xsph,KERNEL_Wendland,floating>(dc,cte);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for Shepard interaction.
//------------------------------------------------------------------------------
template<bool floating> __global__ void KerPreShepard(unsigned pini,unsigned npf,float3 *pos,float *rhop,unsigned *idp,unsigned nbound,float ftposx,float4 *posrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<npf){
p+=pini;
float3 rpos=(floating&&idp[p]<nbound? make_float3(ftposx,0,0): pos[p]);
posrhop[p]=make_float4(rpos.x,rpos.y,rpos.z,rhop[p]);
}
}
//==============================================================================
/// Applies Shepard density filter.
//==============================================================================
template<TpKernel tkernel,bool floating,unsigned ncellnv> void CsRunShepard(StDeviceContext *dc,StDeviceCte *cte){
TmgStart(dc->timers,TMG_CfShepard);
dim3 sgrid=CsGetGridSize(dc->npf,BLOCKSIZE);
hipLaunchKernelGGL(( KerPreShepard<floating>) , dim3(sgrid),dim3(BLOCKSIZE), 0, 0, dc->npb,dc->npf,dc->pos,dc->rhop,dc->idp,dc->nbound,dc->posmin.x-(cte->h*2),dc->pospres);
//-Shepard interaction.
const unsigned bsize=dc->bsshepard;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
switch(dc->cellmode){
case CELLMODE_Hneigs:
hipLaunchKernelGGL(( KerComputeForcesShepardNeigs<tkernel,ncellnv>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvf,cte->massf,dc->pospres,dc->fdrhop);
break;
case CELLMODE_2H:
case CELLMODE_H:
hipLaunchKernelGGL(( KerComputeForcesShepard<tkernel,(ncellnv==9? 1: 2)>) , dim3(sgridf),dim3(bsize), 0, 0, dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegf,cte->massf,dc->pospres,dc->fdrhop);
break;
default: throw std::string("CsRunShepard> CellMode unrecognised.");
}
CheckErrorCuda("Failed while executing kernel Shepard.");
//-Applies new density values.
hipMemcpy(dc->rhop+dc->npb,dc->fdrhop,sizeof(float)*dc->npf,hipMemcpyDeviceToDevice);
TmgStop(dc->timers,TMG_CfShepard);
}
//==============================================================================
/// Calls CsRunShepard().
//==============================================================================
template<TpKernel tkernel,bool floating> void CsCallRunShepard3(StDeviceContext *dc,StDeviceCte *cte){
if(dc->ncellnv==9)CsRunShepard<tkernel,floating,9>(dc,cte);
else if(dc->ncellnv==25)CsRunShepard<tkernel,floating,25>(dc,cte);
}
//==============================================================================
template<TpKernel tkernel> void CsCallRunShepard2(StDeviceContext *dc,StDeviceCte *cte){
if(dc->nfloat)CsCallRunShepard3<tkernel,true>(dc,cte);
else CsCallRunShepard3<tkernel,false>(dc,cte);
}
//==============================================================================
void CsCallRunShepard(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tkernel==KERNEL_Cubic)CsCallRunShepard2<KERNEL_Cubic>(dc,cte);
else if(dc->tkernel==KERNEL_Wendland)CsCallRunShepard2<KERNEL_Wendland>(dc,cte);
}
|
be5cfe39eddc510996866884ea19b6f317ca7fd9.cu
|
/*
<DUALSPHYSICS> Copyright (C) 2012 by Jose M. Dominguez, Dr. Alejandro Crespo, Prof. M. Gomez Gesteira, Anxo Barreiro, Dr. Benedict Rogers.
EPHYSLAB Environmental Physics Laboratory, Universidade de Vigo, Ourense, Spain.
School of Mechanical, Aerospace and Civil Engineering, University of Manchester, Manchester, U.K.
This file is part of DualSPHysics.
DualSPHysics is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
DualSPHysics is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License, along with DualSPHysics. If not, see <http://www.gnu.org/licenses/>.
*/
/// \file CudaSphFC.cu \brief Implements all the functions to compute forces on GPU.
//##############################################################################
//# Kernels for force computation (including Floating objects).
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph> __device__ void KerComputeForcesFluidBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned *idp,float massp2,float massftp1,float3 posp1,float3 velp1,float3 devsp1,float rhopp1,float3 &acep1,float3 &vcor,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
const float prrhop2=pospres2.w/(velrhop2.w*velrhop2.w);
float prs=devsp1.x+prrhop2;
float wab,frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
//-Tensile correction.
float fab=wab*CTE.cubic_odwdeltap;
fab*=fab; fab*=fab; //fab=fab^4
prs+=fab*(devsp1.y+ prrhop2*(pospres2.w>0? 0.01f: -0.2f) );
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq=2.f*qq+1.f;
const float wqq1=1.f-0.5f*qq;
const float wqq2=wqq1*wqq1;
wab=CTE.wendland_awen*wqq*wqq2*wqq2;
fac=CTE.wendland_bwen*qq*wqq2*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
if(floating)massp2=(p2<CTE.nbound||idp[p2]<CTE.nbound? CTE.massb: CTE.massf);
{//===== Aceleration =====
const float p_vpm=-prs*(floating? massp2*massftp1: massp2);
acep1.x+=p_vpm*frx; acep1.y+=p_vpm*fry; acep1.z+=p_vpm*frz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
float robar=(rhopp1+velrhop2.w)*0.5f;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
{//-Artificial viscosity
if(dot<0){
const float csound=velrhop2.w*OVERRHOPZERO; //const float csound=CTE.cs0*powf(rrhop*OVERRHOPZERO,3);
const float cbar=(devsp1.z+ CTE.cs0*(csound*csound*csound) )*0.5f;
const float amubar=CTE.h*dot_rr2; //float amubar=CTE.h*dot/(rr2+CTE.eta2);
const float pi_visc=(-CTE.visco*cbar*amubar/robar)*massp2;
acep1.x-=pi_visc*frx; acep1.y-=pi_visc*fry; acep1.z-=pi_visc*frz;
}
}
visc=max(dot_rr2,visc); //reduction of viscdt will be performed on GPU.
}
//-Density derivative.
arp1+=massp2*(dvx*frx+dvy*fry+dvz*frz);
//-XSPH correction.
if(xsph){
const float wab_rhobar=massp2*(wab/robar);
vcor.x-=wab_rhobar * dvx;
vcor.y-=wab_rhobar * dvy;
vcor.z-=wab_rhobar * dvz;
}
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating> __device__ void KerComputeForcesBoundBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned* idp,float massf,float3 posp1,float3 velp1,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
float frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
float wqq2=(radgt? 2.0f-qq: qq); wqq2*=wqq2;
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq1=1.f-0.5f*qq;
fac=CTE.wendland_bwen*qq*wqq1*wqq1*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
visc=max(dot_rr2,visc); // reduction of viscdt will be performed on GPU.
}
//-Density derivative.
if(floating)arp1+=(idp[p2]<CTE.nbound? CTE.massb: CTE.massf)*(dvx*frx+dvy*fry+dvz*frz);
else arp1+=massf*(dvx*frx+dvy*fry+dvz*frz);
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph,unsigned ncellnv> __global__ void KerComputeForcesFluidNeigs
(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvb,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
float massftp1=(floating? (idp[p1]<CTE.nbound? CTE.massb: 1.f): 0);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massf),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massb),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){
r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r;
}
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,unsigned ncellnv> __global__ void KerComputeForcesBoundNeigs
(unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float *ar,float *viscdt)
{
unsigned p1=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p1<n){
float arp1=0,visc=0;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p1];
//-Interaction of boundary with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesBoundBox<tkernel,floating>(p1,rg.x,rg.y,pospres,velrhop,idp,(floating? 0: CTE.massf),posp1,velp1,arp1,visc);
}
//-Stores results.
if(arp1||visc){
ar[p1]+=arp1;
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,bool xsph,unsigned hdiv> __global__ void KerComputeForcesFluid
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegb,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
float massftp1=(floating? (idp[p1]<CTE.nbound? CTE.massb: 1.f): 0);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massf),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFluidBox<tkernel,floating,xsph> (p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massb),massftp1,posp1,velp1,devsp1,rhopp1,acep1,vcor,arp1,visc);
}
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){
r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r;
}
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Bound-Fluid).
//------------------------------------------------------------------------------
template<TpKernel tkernel,bool floating,unsigned hdiv> __global__ void KerComputeForcesBound(unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float *ar,float *viscdt)
{
unsigned p1=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p1<n){
float arp1=0,visc=0;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p1];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction of boundary with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesBoundBox<tkernel,floating>(p1,pini,pfin,pospres,velrhop,idp,(floating? 0: CTE.massf),posp1,velp1,arp1,visc);
}
}
//-Stores results.
if(arp1||visc){
ar[p1]+=arp1;
if(visc>viscdt[p1])viscdt[p1]=visc;
}
}
}
//##############################################################################
//# Kernels for force computation when using Laminar+SPS viscosity and KGC.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph> __device__ void KerComputeForcesFullFluidBox
(unsigned p1,const unsigned &pini,const unsigned &pfin,float4 *pospres,float4 *velrhop,unsigned *idp,const tsymatrix3f *tau,float massp2,float3 posp1,float3 velp1,float3 devsp1,float rhopp1,const tsymatrix3f &matkgcp1,const tsymatrix3f &taup1,tsymatrix3f &csphp1,float3 &acep1,float3 &vcor,float &arp1,float &visc)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 pospres2=pospres[p2];
float drx=posp1.x-pospres2.x;
float dry=posp1.y-pospres2.y;
float drz=posp1.z-pospres2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
const float4 velrhop2=velrhop[p2];
const float prrhop2=pospres2.w/(velrhop2.w*velrhop2.w);
float prs=devsp1.x+prrhop2;
float wab,frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
fac=(radgt? CTE.cubic_c2*wqq2: (CTE.cubic_c1*qq+CTE.cubic_d1*wqq2))/rad;
//-Tensile correction.
float fab=wab*CTE.cubic_odwdeltap;
fab*=fab; fab*=fab; //fab=fab^4
prs+=fab*(devsp1.y+ prrhop2*(pospres2.w>0? 0.01f: -0.2f) );
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq=2.f*qq+1.f;
const float wqq1=1.f-0.5f*qq;
const float wqq2=wqq1*wqq1;
wab=CTE.wendland_awen*wqq*wqq2*wqq2;
fac=CTE.wendland_bwen*qq*wqq2*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
//===== Kernel Gradient Correction =====
if(kgc==true){
float frx2=matkgcp1.xx*frx + matkgcp1.xy*fry + matkgcp1.xz*frz;
float fry2=matkgcp1.xy*frx + matkgcp1.yy*fry + matkgcp1.yz*frz;
float frz2=matkgcp1.xz*frx + matkgcp1.yz*fry + matkgcp1.zz*frz;
frx=frx2; fry=fry2; frz=frz2;
}
{//===== Aceleration =====
const float p_vpm=-prs*massp2;
acep1.x+=p_vpm*frx; acep1.y+=p_vpm*fry; acep1.z+=p_vpm*frz;
}
float dvx=velp1.x-velrhop2.x, dvy=velp1.y-velrhop2.y, dvz=velp1.z-velrhop2.z;
float robar=(rhopp1+velrhop2.w)*0.5f;
{//===== Viscosity =====
const float dot=drx*dvx + dry*dvy + drz*dvz;
const float dot_rr2=dot/(rr2+CTE.eta2);
if(tvisco==VISCO_Artificial){//-Artificial viscosity.
if(dot<0){
const float csound=velrhop2.w*OVERRHOPZERO; //const float csound=CTE.cs0*powf(rrhop*OVERRHOPZERO,3);
const float cbar=(devsp1.z+ CTE.cs0*(csound*csound*csound) )*0.5f;
const float amubar=CTE.h*dot_rr2; //float amubar=CTE.h*dot/(rr2+CTE.eta2);
const float pi_visc=(-CTE.visco*cbar*amubar/robar)*massp2;
acep1.x-=pi_visc*frx; acep1.y-=pi_visc*fry; acep1.z-=pi_visc*frz;
}
}
if(tvisco==VISCO_LaminarSPS){//-LaminarSPS viscosity.
const float temp=2.0f*CTE.visco/((rr2+CTE.eta2)*robar);
const float vtemp=massp2*temp*(drx*frx+dry*fry+drz*frz);
acep1.x+=vtemp*dvx; acep1.y+=vtemp*dvy; acep1.z+=vtemp*dvz;
// SPS turbulence model.
tsymatrix3f tausum=taup1;
if(p2>CTE.nbound){
tausum=tau[p2-CTE.nbound];
tausum.xx+=taup1.xx;
tausum.xy+=taup1.xy;
tausum.xz+=taup1.xz;
tausum.yy+=taup1.yy;
tausum.yz+=taup1.yz;
tausum.zz+=taup1.zz;
}
acep1.x+=massp2*(tausum.xx*frx+tausum.xy*fry+tausum.xz*frz);
acep1.y+=massp2*(tausum.xy*frx+tausum.yy*fry+tausum.yz*frz);
acep1.z+=massp2*(tausum.xz*frx+tausum.yz*fry+tausum.zz*frz);
// CSPH terms.
const float volp2=-massp2/velrhop2.w;
float dv=dvx*volp2; csphp1.xx+=dv*frx; csphp1.xy+=dv*fry; csphp1.xz+=dv*frz;
dv=dvy*volp2; csphp1.xy+=dv*frx; csphp1.yy+=dv*fry; csphp1.yz+=dv*frz;
dv=dvz*volp2; csphp1.xz+=dv*frx; csphp1.yz+=dv*fry; csphp1.zz+=dv*frz;
}
visc=max(dot_rr2,visc); // reduction of viscdt will be performed on GPU.
}
//-Density derivative.
arp1+=massp2*(dvx*frx+dvy*fry+dvz*frz);
//-XSPH correction.
if(xsph){
const float wab_rhobar=massp2*(wab/robar);
vcor.x-=wab_rhobar * dvx;
vcor.y-=wab_rhobar * dvy;
vcor.z-=wab_rhobar * dvz;
}
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph,unsigned ncellnv> __global__ void KerComputeForcesFullFluidNeigs
(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvb,uint2 *cellnvf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt,const tsymatrix3f *matkgc,const tsymatrix3f *tau,tsymatrix3f *csph)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
tsymatrix3f matkgcp1=matkgc[p];
tsymatrix3f taup1=tau[p];
tsymatrix3f csphp1={0,0,0,0,0,0};
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,tau,CTE.massf,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,rg.x,rg.y,pospres,velrhop,idp,tau,CTE.massb,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){ r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r; }
if(visc>viscdt[p1])viscdt[p1]=visc;
if(tvisco==VISCO_LaminarSPS)csph[p]=csphp1;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,TpVisco tvisco,bool kgc,bool xsph,unsigned hdiv> __global__ void KerComputeForcesFullFluid
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegb,int2 *cellbegf,float4 *pospres,float4 *velrhop,unsigned *idp,float3 *ace,float *ar,float3 *velxcor,float *viscdt,const tsymatrix3f *matkgc,const tsymatrix3f *tau,tsymatrix3f *csph)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float arp1=0,visc=0;
float3 acep1=make_float3(0,0,0);
float3 vcor;
if(xsph)vcor=acep1;
float4 r=velrhop[p1];
float3 velp1=make_float3(r.x,r.y,r.z);
float rhopp1=r.w;
r=pospres[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const float csoun=rhopp1*OVERRHOPZERO;
float3 devsp1; devsp1.x=r.w/(rhopp1*rhopp1); devsp1.y=devsp1.x*(r.w>0? 0.01f: -0.2f); devsp1.z=CTE.cs0*(csoun*csoun*csoun);
tsymatrix3f matkgcp1=matkgc[p];
tsymatrix3f taup1=tau[p];
tsymatrix3f csphp1={0,0,0,0,0,0};
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,pini,pfin,pospres,velrhop,idp,tau,CTE.massf,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesFullFluidBox<tkernel,tvisco,kgc,xsph> (p1,pini,pfin,pospres,velrhop,idp,tau,CTE.massb,posp1,velp1,devsp1,rhopp1,matkgcp1,taup1,csphp1,acep1,vcor,arp1,visc);
}
}
//-Stores results.
if(arp1||acep1.x||acep1.y||acep1.z||visc){
ar[p1]+=arp1;
float3 r=ace[p1]; r.x+=acep1.x; r.y+=acep1.y; r.z+=acep1.z; ace[p1]=r;
if(xsph){ r=velxcor[p1]; r.x+=vcor.x; r.y+=vcor.y; r.z+=vcor.z; velxcor[p1]=r; }
if(visc>viscdt[p1])viscdt[p1]=visc;
if(tvisco==VISCO_LaminarSPS)csph[p]=csphp1;
}
}
}
//##############################################################################
//# Kernel: CELLMODE_Hneigs for Shepard density filter.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles.
//------------------------------------------------------------------------------
template<TpKernel tkernel> __device__ void KerComputeForcesShepardBox(unsigned p1,const unsigned &pini,const unsigned &pfin,float massfluid,float4 *posrhop,float4 posrhop1,float &fdwabp1,float &fdrhopp1)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 posrhop2=posrhop[p2];
float drx=posrhop1.x-posrhop2.x;
float dry=posrhop1.y-posrhop2.y;
float drz=posrhop1.z-posrhop2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
float wab;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const bool radgt=qq>1;
const float wqq1=(radgt? 2.0f-qq: qq);
const float wqq2=wqq1*wqq1;
const float wqq3=wqq2*wqq1;
wab=(radgt? CTE.cubic_a24*wqq3: CTE.cubic_a2*(1.0f-1.5f*wqq2+0.75f*wqq3));
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
float wqq2=1.f-0.5f*qq; wqq2*=wqq2;
wab=CTE.wendland_awen*(2*qq+1)*wqq2*wqq2;
}
}
fdwabp1+=wab*(massfluid/posrhop2.w);
fdrhopp1+=wab;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction Shepard between fluid particles.
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned ncellnv> __global__ void KerComputeForcesShepardNeigs(unsigned pini,unsigned n,unsigned ncells,unsigned *cell,uint2 *cellnvf,float massfluid,float4 *posrhop,float *fdrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float4 posrhop1=posrhop[p1];
float fdrhopp1=CTE.cteshepard;
float fdwabp1=fdrhopp1*(massfluid/posrhop1.w);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeForcesShepardBox <tkernel> (p1,rg.x,rg.y,massfluid,posrhop,posrhop1,fdwabp1,fdrhopp1);
}
//-Stores results.
fdrhop[p]=(fdrhopp1*massfluid)/fdwabp1;
}
}
//##############################################################################
//# Kernel: CELLMODE_2H & CELLMODE_H for Shepard density filter
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned hdiv> __global__ void KerComputeForcesShepard(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,unsigned *cell,int2 *cellbegf,float massfluid,float4 *posrhop,float *fdrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
float4 posrhop1=posrhop[p1];
float fdrhopp1=CTE.cteshepard;
float fdwabp1=fdrhopp1*(massfluid/posrhop1.w);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeForcesShepardBox <tkernel> (p1,pini,pfin,massfluid,posrhop,posrhop1,fdwabp1,fdrhopp1);
}
}
//-Stores results.
fdrhop[p]=(fdrhopp1*massfluid)/fdwabp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that makes ace[].y equal to zero.
//------------------------------------------------------------------------------
__global__ void KerResetAcey(unsigned n,unsigned npb,float3 *ace)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n)ace[p+npb].y=0;
}
//##############################################################################
//# Kernel: CELLMODE_Hneigs for MatrixKGC when kernel gradient correction.
//##############################################################################
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction of a particle with a set of particles
//------------------------------------------------------------------------------
template<TpKernel tkernel> __device__ void KerComputeMatrixKgcBox(unsigned p1,const unsigned &pini,const unsigned &pfin,float massp2,const float4 *posrhop,const float3 &posp1,tsymatrix3f &matkgcp1)
{
for(int p2=pini;p2<pfin;p2++)if(p1!=p2){
float4 posrhopp2=posrhop[p2];
float drx=posp1.x-posrhopp2.x;
float dry=posp1.y-posrhopp2.y;
float drz=posp1.z-posrhopp2.z;
float rr2=drx*drx+dry*dry+drz*drz;
if(rr2<=CTE.fourh2&&rr2>=1e-18f){
float frx,fry,frz;
{//===== Kernel =====
const float rad=sqrt(rr2);
const float qq=rad/CTE.h;
float fac;
if(tkernel==KERNEL_Cubic){//-Cubic kernel.
const float wqq1=2.0f-qq;
fac=(qq>1? CTE.cubic_c2*(wqq1*wqq1): (CTE.cubic_c1+CTE.cubic_d1*qq)*qq) /rad;
}
if(tkernel==KERNEL_Wendland){//-Wendland kernel.
const float wqq1=1.f-0.5f*qq;
fac=CTE.wendland_bwen*qq*wqq1*wqq1*wqq1/rad;
}
frx=fac*drx; fry=fac*dry; frz=fac*drz;
}
//===== KGC Matrix =====
const float volp2=-massp2/posrhopp2.w;
float fr=frx*volp2; matkgcp1.xx+=fr*drx; matkgcp1.xy+=fr*dry; matkgcp1.xz+=fr*drz;
fr=fry*volp2; matkgcp1.xy+=fr*drx; matkgcp1.yy+=fr*dry; matkgcp1.yz+=fr*drz;
fr=frz*volp2; matkgcp1.xz+=fr*drx; matkgcp1.yz+=fr*dry; matkgcp1.zz+=fr*drz;
}
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles using NeigsCell.
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned ncellnv> __global__ void KerComputeMatrixKgcNeigs
(unsigned pini,unsigned n,unsigned ncells,const unsigned *cell,const uint2 *cellnvb,const uint2 *cellnvf,const float4 *posrhop,float massb,float massf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
tsymatrix3f matkgcp1={0,0,0,0,0,0};
float4 r=posrhop[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p];
//-Interaction with fluid particles.
uint2 rg;
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvf[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeMatrixKgcBox<tkernel> (p1,rg.x,rg.y,massf,posrhop,posp1,matkgcp1);
}
//-Interaction with boundaries.
for(int r=0;r<ncellnv;r++){
rg=(!r||rg.y? cellnvb[r*ncells+cel]: make_uint2(0,0));
if(rg.y)KerComputeMatrixKgcBox<tkernel> (p1,rg.x,rg.y,massb,posrhop,posp1,matkgcp1);
}
//-Stores results.
matkgc[p]=matkgcp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes interaction between particles (Fluid-Fluid & Fluid-Bound).
//------------------------------------------------------------------------------
template<TpKernel tkernel,unsigned hdiv> __global__ void KerComputeMatrixKgc
(unsigned pini,unsigned n,unsigned ncx,unsigned ncy,unsigned ncz,const unsigned *cell,const int2 *cellbegb,const int2 *cellbegf,const float4 *posrhop,float massb,float massf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
unsigned p1=p+pini;
tsymatrix3f matkgcp1={0,0,0,0,0,0};
float4 r=posrhop[p1];
float3 posp1=make_float3(r.x,r.y,r.z);
const int cel=cell[p];
//-Obtains limits of interaction.
int cx=cel%ncx;
int cz=int(cel/(ncx*ncy));
int cy=int((cel%(ncx*ncy))/ncx);
int cxini=cx-min(cx,hdiv);
int cxfin=cx+min(ncx-cx-1,hdiv)+1;
int yini=cy-min(cy,hdiv);
int yfin=cy+min(ncy-cy-1,hdiv)+1;
int zini=cz-min(cz,hdiv);
int zfin=cz+min(ncz-cz-1,hdiv)+1;
//-Interaction with fluid particles.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegf[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeMatrixKgcBox<tkernel> (p1,pini,pfin,massf,posrhop,posp1,matkgcp1);
}
}
//-Interaction with boundaries.
for(int z=zini;z<zfin;z++){
int zmod=(ncx*ncy)*z;
for(int y=yini;y<yfin;y++){
int ymod=zmod+ncx*y;
unsigned pini,pfin=0;
for(int x=cxini;x<cxfin;x++){
int2 cbeg=cellbegb[x+ymod];
if(cbeg.y){
if(!pfin)pini=cbeg.x;
pfin=cbeg.y;
}
}
if(pfin)KerComputeMatrixKgcBox<tkernel> (p1,pini,pfin,massb,posrhop,posp1,matkgcp1);
}
}
//-Stores results.
matkgc[p]=matkgcp1;
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for interaction MatrixKgc.
//------------------------------------------------------------------------------
__global__ void KerPreInteraction_MatrixKgc(unsigned np,const float3 *pos,const float *rhop,float4 *posrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<np){
float3 rpos=pos[p];
posrhop[p]=make_float4(rpos.x,rpos.y,rpos.z,rhop[p]);
}
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that inverts MatrixKgc.
//------------------------------------------------------------------------------
template<bool simula2d> __global__ void KerInvertMatrixKgc(unsigned npf,tsymatrix3f *matkgc)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<npf){
tsymatrix3f mat=matkgc[p];
//-Matrix in case of 2D simulation.
if(simula2d)mat.yy=1.0f;
//-Inversion of a symmetric matrix.
mat.xy*=0.5f;
mat.xz*=0.5f;
mat.yz*=0.5f;
float det=mat.xx*mat.yy*mat.zz + 2.f*mat.xy*mat.yz*mat.xz - mat.xz*mat.yy*mat.xz - mat.xx*mat.yz*mat.yz - mat.xy*mat.xy*mat.zz; //-Determinant of the matrix.
//-Matrix inversion.
tsymatrix3f invmat={1,0,0 ,1,0 ,1}; //-Matrix when no correction will be applied.
if(fabs(det)>0.01 && fabs(mat.xx)>MATRIXKGC_CONTROL && fabs(mat.yy)>MATRIXKGC_CONTROL && fabs(mat.zz)>MATRIXKGC_CONTROL){
invmat.xx=(mat.yy*mat.zz-mat.yz*mat.yz)/det;
invmat.xy=(mat.xz*mat.yz-mat.xy*mat.zz)/det;
invmat.xz=(mat.xy*mat.yz-mat.yy*mat.xz)/det;
invmat.yy=(mat.xx*mat.zz-mat.xz*mat.xz)/det;
invmat.yz=(mat.xy*mat.xz-mat.xx*mat.yz)/det;
invmat.zz=(mat.xx*mat.yy-mat.xy*mat.xy)/det;
}
matkgc[p]=invmat;
}
}
//==============================================================================
/// Computes matrix of kernel gradient correction (KGC).
//==============================================================================
template<TpKernel tkernel,unsigned ncellnv> void CsInteraction_MatrixKgc(StDeviceContext *dc,StDeviceCte *cte){
TmgStart(dc->timers,TMG_CfMatrixKgc);
cudaMemset(dc->matkgc,0,sizeof(tsymatrix3f)*dc->npf); //matkgc[]=0
dim3 sgrid=CsGetGridSize(dc->npok,BLOCKSIZE);
KerPreInteraction_MatrixKgc <<<sgrid,BLOCKSIZE>>>(dc->npok,dc->pos,dc->rhop,dc->pospres);
switch(dc->cellmode){
case CELLMODE_Hneigs:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=dc->bsmatrixkgc;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
KerComputeMatrixKgcNeigs<tkernel,ncellnv> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,cte->massb,cte->massf,dc->matkgc);
}break;
case CELLMODE_2H:
case CELLMODE_H:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=dc->bsmatrixkgc;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
KerComputeMatrixKgc<tkernel,(ncellnv==9? 1: 2)> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,cte->massb,cte->massf,dc->matkgc);
}break;
default: throw std::string("CsInteraction_MatrixKgc> CellMode unrecognised.");
}
//-Inversion of a matrix matkgc.
if(dc->simulate2d)KerInvertMatrixKgc<true> <<<sgrid,BLOCKSIZE>>>(dc->npf,dc->matkgc);
else KerInvertMatrixKgc<false> <<<sgrid,BLOCKSIZE>>>(dc->npf,dc->matkgc);
CheckErrorCuda("Failed while executing kernels of MatrixKgc interaction.");
TmgStop(dc->timers,TMG_CfMatrixKgc);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for force computation.
//------------------------------------------------------------------------------
__global__ void KerPreInteraction_Forces(unsigned n,unsigned npb,float gamma,float b,float3 gravity,const float3 *pos,const float3 *vel,const float *rhop,float *csound,float3 *ace,float4 *pospres,float4 *velrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
float rrhop=rhop[p];
float press=b*(powf(rrhop*OVERRHOPZERO,gamma)-1.0f);
float csoun=rrhop*OVERRHOPZERO;
csound[p]=CTE.cs0*(csoun*csoun*csoun);
float3 r=pos[p]; pospres[p]=make_float4(r.x,r.y,r.z,press);
r=vel[p]; velrhop[p]=make_float4(r.x,r.y,r.z,rrhop);
ace[p]=(p<npb? make_float3(0,0,0): gravity);
}
}
//==============================================================================
/// Prepares variables for force computation.
//==============================================================================
void CsPreInteraction_Forces(StDeviceContext *dc,bool xsph){
TmgStart(dc->timers,TMG_CfPreForces);
cudaMemset(dc->ar,0,sizeof(float)*dc->npok); //ar[]=0
if(xsph)cudaMemset(dc->velxcor,0,sizeof(float3)*dc->npok); //velxcor[]=0
if(dc->tvisco==VISCO_LaminarSPS)cudaMemset(dc->csph,0,sizeof(tsymatrix3f)*(dc->npf)); //csph[]={0,..,0}
cudaMemset(dc->viscdt,0,sizeof(float)*dc->npok);
dim3 sgrid=CsGetGridSize(dc->npok,BLOCKSIZE);
KerPreInteraction_Forces <<<sgrid,BLOCKSIZE>>> (dc->npok,dc->npb,dc->gamma,dc->b,dc->gravity,dc->pos,dc->vel,dc->rhop,dc->csound,dc->ace,dc->pospres,dc->velrhop);
CheckErrorCuda("Failed preparing vars for interaction forces.");
TmgStop(dc->timers,TMG_CfPreForces);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that computes sub-particle stress tensor (Tau) for SPS turbulence model.
//------------------------------------------------------------------------------
__global__ void KerSPSCalcTau(unsigned n,unsigned npb,float smag,float blin,const float *rhop,const tsymatrix3f *csph,tsymatrix3f *tau)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<n){
tsymatrix3f rcsph=csph[p];
const float pow1=rcsph.xx*rcsph.xx + rcsph.yy*rcsph.yy + rcsph.zz*rcsph.zz;
const float prr=pow1+pow1 + rcsph.xy*rcsph.xy + rcsph.xz*rcsph.xz + rcsph.yz*rcsph.yz;
const float visc_SPS=smag*sqrt(prr);
const float div_u=rcsph.xx+rcsph.yy+rcsph.zz;
const float sps_k=(2.0f/3.0f)*visc_SPS*div_u;
const float sps_Blin=blin*prr;
const float sumsps=-(sps_k+sps_Blin);
const float twovisc_SPS=(visc_SPS+visc_SPS);
const float one_rho2 = 1.0f/rhop[p+npb];
tsymatrix3f rtau;
rtau.xx=one_rho2*(twovisc_SPS*rcsph.xx +sumsps);
rtau.xy=one_rho2*(visc_SPS*rcsph.xy);
rtau.xz=one_rho2*(visc_SPS*rcsph.xz);
rtau.yy=one_rho2*(twovisc_SPS*rcsph.yy +sumsps);
rtau.yz=one_rho2*(visc_SPS*rcsph.yz);
rtau.zz=one_rho2*(twovisc_SPS*rcsph.zz +sumsps);
tau[p]=rtau;
}
}
//==============================================================================
/// Interaction between particles.
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool kgc,bool floating,unsigned ncellnv> void CsInteraction_Forces(StDeviceContext *dc,StDeviceCte *cte){
//-Interaction to compute matrix for Kernel Gradient Correction.
if(dc->kgc)CsInteraction_MatrixKgc<tkernel,ncellnv>(dc,cte);
//-Interaction Forces
CsPreInteraction_Forces(dc,xsph);
switch(dc->cellmode){
case CELLMODE_Hneigs:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=(xsph? dc->bsforcesfluid: dc->bsforcesfluidcorr);
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
TmgStart(dc->timers,TMG_CfForcesFluid);
if(kgc||tvisco==VISCO_LaminarSPS)KerComputeForcesFullFluidNeigs<tkernel,tvisco,kgc,xsph,ncellnv> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt,dc->matkgc,dc->tau,dc->csph);
else KerComputeForcesFluidNeigs<tkernel,floating,xsph,ncellnv> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesFluid);
//-Interaction Boundary-Fluid.
bsize=dc->bsforcesbound;
dim3 sgridb=CsGetGridSize(dc->npb,bsize);
TmgStart(dc->timers,TMG_CfForcesBound);
KerComputeForcesBoundNeigs<tkernel,floating,ncellnv> <<<sgridb,bsize>>> (dc->npb,dc->nctotmax-1,dc->cellb,dc->cellnvf,dc->pospres,dc->velrhop,dc->idp,dc->ar,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesBound);
}break;
case CELLMODE_2H:
case CELLMODE_H:{
//-Interaction Fluid-Fluid & Fluid-Boundary.
unsigned bsize=(xsph? dc->bsforcesfluid: dc->bsforcesfluidcorr);
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
TmgStart(dc->timers,TMG_CfForcesFluid);
if(kgc||tvisco==VISCO_LaminarSPS)KerComputeForcesFullFluid<tkernel,tvisco,kgc,xsph,(ncellnv==9? 1: 2)> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt,dc->matkgc,dc->tau,dc->csph);
else KerComputeForcesFluid<tkernel,floating,xsph,(ncellnv==9? 1: 2)> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ace,dc->ar,dc->velxcor,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesFluid);
//-Interaction Boundary-Fluid.
bsize=dc->bsforcesbound;
dim3 sgridb=CsGetGridSize(dc->npb,bsize);
TmgStart(dc->timers,TMG_CfForcesBound);
KerComputeForcesBound<tkernel,floating,(ncellnv==9? 1: 2)> <<<sgridb,bsize>>> (dc->npb,dc->ncx,dc->ncy,dc->ncz,dc->cellb,dc->cellbegf,dc->pospres,dc->velrhop,dc->idp,dc->ar,dc->viscdt);
TmgStop(dc->timers,TMG_CfForcesBound);
}break;
default: throw std::string("CsInteraction_Forces> CellMode unrecognised.");
}
//-Computes values of tau[] starting from csph[].
if(tvisco==VISCO_LaminarSPS){
dim3 sgridf=CsGetGridSize(dc->npf,BLOCKSIZE);
KerSPSCalcTau <<<sgridf,BLOCKSIZE>>> (dc->npf,dc->npb,dc->smag,dc->blin,dc->rhop,dc->csph,dc->tau);
}
//-Cancels forces in Y direction when 2D case.
if(dc->simulate2d){
dim3 sgrid=CsGetGridSize(dc->npf,BLOCKSIZE);
KerResetAcey <<<sgrid,BLOCKSIZE>>> (dc->npf,dc->npb,dc->ace);
}
CheckErrorCuda("Failed while executing kernels of interaction forces.");
}
//==============================================================================
/// Calls CsInteraction_Forces().
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool kgc,bool floating>void CsCallInteraction_Forces3(StDeviceContext *dc,StDeviceCte *cte){
if(dc->ncellnv==9)CsInteraction_Forces<xsph,tkernel,tvisco,kgc,floating,9>(dc,cte);
else if(dc->ncellnv==25)CsInteraction_Forces<xsph,tkernel,tvisco,kgc,floating,25>(dc,cte);
}
//==============================================================================
template<bool xsph,TpKernel tkernel,TpVisco tvisco,bool floating>void CsCallInteraction_Forces2(StDeviceContext *dc,StDeviceCte *cte){
if(dc->kgc)CsCallInteraction_Forces3<xsph,tkernel,tvisco,true,floating>(dc,cte);
else CsCallInteraction_Forces3<xsph,tkernel,tvisco,false,floating>(dc,cte);
}
//==============================================================================
template<bool xsph,TpKernel tkernel,bool floating>void CsCallInteraction_Forces1(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tvisco==VISCO_Artificial)CsCallInteraction_Forces2<xsph,tkernel,VISCO_Artificial,floating>(dc,cte);
else if(dc->tvisco==VISCO_LaminarSPS)CsCallInteraction_Forces2<xsph,tkernel,VISCO_LaminarSPS,floating>(dc,cte);
}
//==============================================================================
template<bool xsph,bool floating>void CsCallInteraction_Forces(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tkernel==KERNEL_Cubic)CsCallInteraction_Forces1<xsph,KERNEL_Cubic,floating>(dc,cte);
else if(dc->tkernel==KERNEL_Wendland)CsCallInteraction_Forces1<xsph,KERNEL_Wendland,floating>(dc,cte);
}
//------------------------------------------------------------------------------
/// CUDA KERNEL that prepares variables for Shepard interaction.
//------------------------------------------------------------------------------
template<bool floating> __global__ void KerPreShepard(unsigned pini,unsigned npf,float3 *pos,float *rhop,unsigned *idp,unsigned nbound,float ftposx,float4 *posrhop)
{
unsigned p=blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
if(p<npf){
p+=pini;
float3 rpos=(floating&&idp[p]<nbound? make_float3(ftposx,0,0): pos[p]);
posrhop[p]=make_float4(rpos.x,rpos.y,rpos.z,rhop[p]);
}
}
//==============================================================================
/// Applies Shepard density filter.
//==============================================================================
template<TpKernel tkernel,bool floating,unsigned ncellnv> void CsRunShepard(StDeviceContext *dc,StDeviceCte *cte){
TmgStart(dc->timers,TMG_CfShepard);
dim3 sgrid=CsGetGridSize(dc->npf,BLOCKSIZE);
KerPreShepard<floating> <<<sgrid,BLOCKSIZE>>>(dc->npb,dc->npf,dc->pos,dc->rhop,dc->idp,dc->nbound,dc->posmin.x-(cte->h*2),dc->pospres);
//-Shepard interaction.
const unsigned bsize=dc->bsshepard;
dim3 sgridf=CsGetGridSize(dc->npf,bsize);
switch(dc->cellmode){
case CELLMODE_Hneigs:
KerComputeForcesShepardNeigs<tkernel,ncellnv> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->nctotmax-1,dc->cellf,dc->cellnvf,cte->massf,dc->pospres,dc->fdrhop);
break;
case CELLMODE_2H:
case CELLMODE_H:
KerComputeForcesShepard<tkernel,(ncellnv==9? 1: 2)> <<<sgridf,bsize>>> (dc->npb,dc->npf,dc->ncx,dc->ncy,dc->ncz,dc->cellf,dc->cellbegf,cte->massf,dc->pospres,dc->fdrhop);
break;
default: throw std::string("CsRunShepard> CellMode unrecognised.");
}
CheckErrorCuda("Failed while executing kernel Shepard.");
//-Applies new density values.
cudaMemcpy(dc->rhop+dc->npb,dc->fdrhop,sizeof(float)*dc->npf,cudaMemcpyDeviceToDevice);
TmgStop(dc->timers,TMG_CfShepard);
}
//==============================================================================
/// Calls CsRunShepard().
//==============================================================================
template<TpKernel tkernel,bool floating> void CsCallRunShepard3(StDeviceContext *dc,StDeviceCte *cte){
if(dc->ncellnv==9)CsRunShepard<tkernel,floating,9>(dc,cte);
else if(dc->ncellnv==25)CsRunShepard<tkernel,floating,25>(dc,cte);
}
//==============================================================================
template<TpKernel tkernel> void CsCallRunShepard2(StDeviceContext *dc,StDeviceCte *cte){
if(dc->nfloat)CsCallRunShepard3<tkernel,true>(dc,cte);
else CsCallRunShepard3<tkernel,false>(dc,cte);
}
//==============================================================================
void CsCallRunShepard(StDeviceContext *dc,StDeviceCte *cte){
if(dc->tkernel==KERNEL_Cubic)CsCallRunShepard2<KERNEL_Cubic>(dc,cte);
else if(dc->tkernel==KERNEL_Wendland)CsCallRunShepard2<KERNEL_Wendland>(dc,cte);
}
|
73ea180e24951e0be63e58e90b96a346ea1f463a.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/cgemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv_mgpu_core.cuh"
#include "gemv_mgpu_offset_core.cuh"
#include "kblas_defs.h"
#if(TARGET_SM >= 30)
#define cgemvn_mgpu_bs (32)
#define cgemvn_mgpu_ty (4)
//#define cgemvn_mgpu_by (4)
#define cgemvt_mgpu_bs (32)
#define cgemvt_mgpu_ty (2)
//#define cgemvt_mgpu_by (4)
#else
#define cgemvn_mgpu_bs (32)
#define cgemvn_mgpu_ty (8)
//#define cgemvn_mgpu_by (1)
#define cgemvt_mgpu_bs (32)
#define cgemvt_mgpu_ty (8)
//#define cgemvt_mgpu_by (1)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, hipStream_t stream);
extern "C"
int kblas_cgemv_mgpu_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, int gpu_gid, int ngpus,
hipStream_t stream = 0)
{
const cuFloatComplex c_zero = make_cuFloatComplex(0, 0);
if(trans == 'n' || trans == 'N')
{
//******** config parameters
const int thread_x = cgemvn_mgpu_bs;
const int thread_y = cgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_n = 1 * ngpus;
//**************************
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(rows, beta, dY, incy);
if(gpu_gid == 0)kblas_cscal_async(rows, beta, dY, incy, stream);
else kblas_cscal_async(rows, c_zero, dY, incy, stream);
int cols_ = cgemvn_mgpu_bs * ( (cols/cgemvn_mgpu_bs)/ngpus );
if(gpu_gid < (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cgemvn_mgpu_bs;
if(gpu_gid == (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cols%cgemvn_mgpu_bs;
int mod_r = rows % cgemvn_mgpu_bs;
int mod_c = cols_ % cgemvn_mgpu_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/cgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_special<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = rows/cgemvn_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus);
}
else
{
// generic case for rows and cols
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
//printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//************ config parameters
const int thread_x = cgemvt_mgpu_bs;
const int thread_y = cgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_t = 1 * ngpus;
//******************************
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(cols, beta, dY, incy);
if(gpu_gid == 0)kblas_cscal_async(cols, beta, dY, incy, stream);
else kblas_cscal_async(cols, c_zero, dY, incy, stream);
int cols_ = cgemvt_mgpu_bs * ( (cols/cgemvt_mgpu_bs)/ngpus );
if(gpu_gid < (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cgemvt_mgpu_bs;
if(gpu_gid == (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cols%cgemvt_mgpu_bs;
int mod_r = rows % cgemvt_mgpu_bs;
int mod_c = cols_ % cgemvt_mgpu_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols_/cgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_special<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj);
}
else
{
// mod_r != 0
int blocks = cols_/cgemvt_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj);
}
}
else // mod_c != 0
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/cgemvt_mgpu_bs + (mod_c != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_cgemv_mgpu_driver_offset( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, int gpu_gid, int ngpus,
int offset_r, int offset_c,
hipStream_t stream = 0)
{
const cuFloatComplex c_zero = make_cuFloatComplex(0, 0);
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = cgemvn_mgpu_bs;
const int thread_y = cgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % cgemvn_mgpu_bs;
int offset_c_ = offset_c % cgemvn_mgpu_bs;
int total_blocks_skipped_r = offset_r / cgemvn_mgpu_bs;
int total_blocks_skipped_c = offset_c / cgemvn_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * cgemvn_mgpu_bs * lda;
dA += my_skipped_blocks_r * cgemvn_mgpu_bs;
dX += total_blocks_skipped_c * cgemvn_mgpu_bs * incx;
dY += total_blocks_skipped_r * cgemvn_mgpu_bs * incy;
rows -= total_blocks_skipped_r * cgemvn_mgpu_bs;
cols -= total_blocks_skipped_c * cgemvn_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/cgemvn_mgpu_bs) + ((cols%cgemvn_mgpu_bs) != 0);
// scaling with beta
if(gpu_gid == 0)kblas_cscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
else kblas_cscal_async(rows-offset_r_, c_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = cgemvn_mgpu_bs * ( (cols/cgemvn_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cgemvn_mgpu_bs;
if(new_gpu_gid == (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cols%cgemvn_mgpu_bs;
int mod_r = rows % cgemvn_mgpu_bs;
int mod_c = cols_ % cgemvn_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/cgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_mgpu_special_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread>)
, dim3(dimGrid), dim3(dimBlock), 0, stream,
rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 1:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 2:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 3:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 4:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 5:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 6:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 7:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 8:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 9:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 10:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 11:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 12:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 13:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 14:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 15:hipLaunchKernelGGL(( gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = cgemvt_mgpu_bs;
const int thread_y = cgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % cgemvt_mgpu_bs;
int offset_c_ = offset_c % cgemvt_mgpu_bs;
int total_blocks_skipped_r = offset_r / cgemvt_mgpu_bs;
int total_blocks_skipped_c = offset_c / cgemvt_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * cgemvt_mgpu_bs * lda;
dA += my_skipped_blocks_r * cgemvt_mgpu_bs;
dX += total_blocks_skipped_r * cgemvt_mgpu_bs * incx;
dY += total_blocks_skipped_c * cgemvt_mgpu_bs * incy;
rows -= total_blocks_skipped_r * cgemvt_mgpu_bs;
cols -= total_blocks_skipped_c * cgemvt_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/cgemvt_mgpu_bs) + ((cols%cgemvt_mgpu_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)hipblasDscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_cscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
else kblas_cscal_async(cols-offset_r_, c_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = cgemvt_mgpu_bs * ( (cols/cgemvt_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cgemvt_mgpu_bs;
if(new_gpu_gid == (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cols%cgemvt_mgpu_bs;
int mod_r = rows % cgemvt_mgpu_bs;
int mod_c = cols_ % cgemvt_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/cgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_mgpu_special_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/cgemvt_mgpu_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 9:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 10:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 11:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 12:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 13:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 14:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 15:hipLaunchKernelGGL(( gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_cgemv_mgpu( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset_r, int offset_c)
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
hipDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_cgemv_mgpu_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset_r, int offset_c,
hipStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
hipSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_cgemv_mgpu_bs(char trans)
{
if(trans == 'n' || trans == 'N')
return cgemvn_mgpu_bs;
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
return cgemvt_mgpu_bs;
else
{printf("Error .. input %c is not supported for gemv \n", trans); return -1;}
}
|
73ea180e24951e0be63e58e90b96a346ea1f463a.cu
|
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/cgemv_mgpu.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv_mgpu_core.cuh"
#include "gemv_mgpu_offset_core.cuh"
#include "kblas_defs.h"
#if(TARGET_SM >= 30)
#define cgemvn_mgpu_bs (32)
#define cgemvn_mgpu_ty (4)
//#define cgemvn_mgpu_by (4)
#define cgemvt_mgpu_bs (32)
#define cgemvt_mgpu_ty (2)
//#define cgemvt_mgpu_by (4)
#else
#define cgemvn_mgpu_bs (32)
#define cgemvn_mgpu_ty (8)
//#define cgemvn_mgpu_by (1)
#define cgemvt_mgpu_bs (32)
#define cgemvt_mgpu_ty (8)
//#define cgemvt_mgpu_by (1)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, cudaStream_t stream);
extern "C"
int kblas_cgemv_mgpu_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, int gpu_gid, int ngpus,
cudaStream_t stream = 0)
{
const cuFloatComplex c_zero = make_cuFloatComplex(0, 0);
if(trans == 'n' || trans == 'N')
{
//******** config parameters
const int thread_x = cgemvn_mgpu_bs;
const int thread_y = cgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_n = 1 * ngpus;
//**************************
// scaling with beta
//if(gpu_gid == 0)cublasDscal(rows, beta, dY, incy);
if(gpu_gid == 0)kblas_cscal_async(rows, beta, dY, incy, stream);
else kblas_cscal_async(rows, c_zero, dY, incy, stream);
int cols_ = cgemvn_mgpu_bs * ( (cols/cgemvn_mgpu_bs)/ngpus );
if(gpu_gid < (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cgemvn_mgpu_bs;
if(gpu_gid == (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cols%cgemvn_mgpu_bs;
int mod_r = rows % cgemvn_mgpu_bs;
int mod_c = cols_ % cgemvn_mgpu_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/cgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_special<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = rows/cgemvn_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus);
}
else
{
// generic case for rows and cols
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
//printf("gpu_gid = %d, cols_ = %d \n", gpu_gid, cols_);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 1: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 2: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 3: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 4: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 5: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 6: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 7: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 8: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 9: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 10: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 11: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 12: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 13: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 14: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
case 15: gemvn_mgpu_generic<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//************ config parameters
const int thread_x = cgemvt_mgpu_bs;
const int thread_y = cgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
int grid_y_t = 1 * ngpus;
//******************************
// scaling with beta
//if(gpu_gid == 0)cublasDscal(cols, beta, dY, incy);
if(gpu_gid == 0)kblas_cscal_async(cols, beta, dY, incy, stream);
else kblas_cscal_async(cols, c_zero, dY, incy, stream);
int cols_ = cgemvt_mgpu_bs * ( (cols/cgemvt_mgpu_bs)/ngpus );
if(gpu_gid < (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cgemvt_mgpu_bs;
if(gpu_gid == (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cols%cgemvt_mgpu_bs;
int mod_r = rows % cgemvt_mgpu_bs;
int mod_c = cols_ % cgemvt_mgpu_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols_/cgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_special<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, gpu_gid, ngpus, conj);
}
else
{
// mod_r != 0
int blocks = cols_/cgemvt_mgpu_bs;
blocks += 1; // dummy thread block
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj);
}
}
else // mod_c != 0
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/cgemvt_mgpu_bs + (mod_c != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 1: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 2: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 3: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 4: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 5: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 6: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 7: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 8: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 9: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 10: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 11: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 12: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 13: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 14: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
case 15: gemvt_mgpu_generic<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, gpu_gid, ngpus, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_cgemv_mgpu_driver_offset( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, int gpu_gid, int ngpus,
int offset_r, int offset_c,
cudaStream_t stream = 0)
{
const cuFloatComplex c_zero = make_cuFloatComplex(0, 0);
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = cgemvn_mgpu_bs;
const int thread_y = cgemvn_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % cgemvn_mgpu_bs;
int offset_c_ = offset_c % cgemvn_mgpu_bs;
int total_blocks_skipped_r = offset_r / cgemvn_mgpu_bs;
int total_blocks_skipped_c = offset_c / cgemvn_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * cgemvn_mgpu_bs * lda;
dA += my_skipped_blocks_r * cgemvn_mgpu_bs;
dX += total_blocks_skipped_c * cgemvn_mgpu_bs * incx;
dY += total_blocks_skipped_r * cgemvn_mgpu_bs * incy;
rows -= total_blocks_skipped_r * cgemvn_mgpu_bs;
cols -= total_blocks_skipped_c * cgemvn_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/cgemvn_mgpu_bs) + ((cols%cgemvn_mgpu_bs) != 0);
// scaling with beta
if(gpu_gid == 0)kblas_cscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
else kblas_cscal_async(rows-offset_r_, c_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = cgemvn_mgpu_bs * ( (cols/cgemvn_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cgemvn_mgpu_bs;
if(new_gpu_gid == (cols/cgemvn_mgpu_bs)%ngpus) cols_ += cols%cgemvn_mgpu_bs;
int mod_r = rows % cgemvn_mgpu_bs;
int mod_c = cols_ % cgemvn_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/cgemvn_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_mgpu_special_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread>
<<<dimGrid, dimBlock, 0, stream>>>
(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/cgemvn_mgpu_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 1: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 2: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 3: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 4: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 5: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 6: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 7: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 8: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 9: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 10: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 11: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 12: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 13: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 14: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
case 15: gemvn_mgpu_generic_offset<cuFloatComplex, cgemvn_mgpu_bs, cgemvn_mgpu_bs, cgemvn_mgpu_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = cgemvt_mgpu_bs;
const int thread_y = cgemvt_mgpu_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = 2 * ngpus;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % cgemvt_mgpu_bs;
int offset_c_ = offset_c % cgemvt_mgpu_bs;
int total_blocks_skipped_r = offset_r / cgemvt_mgpu_bs;
int total_blocks_skipped_c = offset_c / cgemvt_mgpu_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * cgemvt_mgpu_bs * lda;
dA += my_skipped_blocks_r * cgemvt_mgpu_bs;
dX += total_blocks_skipped_r * cgemvt_mgpu_bs * incx;
dY += total_blocks_skipped_c * cgemvt_mgpu_bs * incy;
rows -= total_blocks_skipped_r * cgemvt_mgpu_bs;
cols -= total_blocks_skipped_c * cgemvt_mgpu_bs;
/** end offset necessary calculation **/
int nstripes = (cols/cgemvt_mgpu_bs) + ((cols%cgemvt_mgpu_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)cublasDscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_cscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
else kblas_cscal_async(cols-offset_r_, c_zero, dY+(offset_r_*incy), incy, stream);
int cols_ = cgemvt_mgpu_bs * ( (cols/cgemvt_mgpu_bs)/ngpus );
if(new_gpu_gid < (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cgemvt_mgpu_bs;
if(new_gpu_gid == (cols/cgemvt_mgpu_bs)%ngpus) cols_ += cols%cgemvt_mgpu_bs;
int mod_r = rows % cgemvt_mgpu_bs;
int mod_c = cols_ % cgemvt_mgpu_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/cgemvt_mgpu_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_mgpu_special_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/cgemvt_mgpu_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 1: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 2: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 3: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 4: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 5: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 6: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 7: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 8: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 9: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 10: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 11: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 12: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 13: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 14: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
case 15: gemvt_mgpu_generic_offset<cuFloatComplex, cgemvt_mgpu_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, new_gpu_gid, ngpus, nstripes, offset_r_, offset_c_, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_cgemv_mgpu( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset_r, int offset_c)
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
// wait for gpus to finish
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
cudaDeviceSynchronize();
}
return 0;
}
/*************************************************************************************/
extern "C"
int kblas_cgemv_mgpu_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex **dA, int lda,
cuFloatComplex **dX, int incx,
cuFloatComplex beta, cuFloatComplex **dY, int incy,
int ngpus,
int offset_r, int offset_c,
cudaStream_t stream[MAX_NGPUS][MAX_STREAMS])
{
const int ngpus_local = ngpus;
if(offset_r == 0 && offset_c == 0)
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, stream[i][0]);
}
}
else
{
for(int i = 0; i < ngpus_local; i++)
{
cudaSetDevice(gpu_lid[i]);
kblas_cgemv_mgpu_driver_offset(trans, rows, cols, alpha, dA[i], lda, dX[i], incx, beta, dY[i], incy, gpu_gid[i], ngpus, offset_r, offset_c);
}
}
return 0;
}
/*************************************************************************************/
extern "C"
int get_cgemv_mgpu_bs(char trans)
{
if(trans == 'n' || trans == 'N')
return cgemvn_mgpu_bs;
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
return cgemvt_mgpu_bs;
else
{printf("Error .. input %c is not supported for gemv \n", trans); return -1;}
}
|
2e565ee7b6b849e51211ba9ad5e688a35ce71e1f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc main_cuda_atom.cu -o main_cuda_atom
#include <stdio.h>
#include <sys/time.h>
long get_millisec(timeval &s, timeval &e){
long seconds = e.tv_sec - s.tv_sec; //seconds
long useconds = e.tv_usec - s.tv_usec; //milliseconds
return ((seconds) * 1000 + useconds/1000.0);
}
struct timeval start, eend;
#define STEPS 2000000000
#define BLOCKS 256
#define THREADS 256
int threadidx;
// Kernel
__global__ void pi_calculation(float* pi, int nsteps, float base, int nthreads, int nblocks)
{
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Calculate index for each thread
float acum = 0;
for (i = idx; i < nsteps; i += nthreads * nblocks)
{
x = (i + 0.5) * base;
acum += 4.0 / (1.0 + x * x); //Save result to device memory
}
atomicAdd(pi, acum);
}
int main(void)
{
gettimeofday(&start, NULL);
dim3 dimGrid(BLOCKS, 1, 1); // Grid dimensions
dim3 dimBlock(THREADS, 1, 1); // Block dimensions
float base = 1.0 / STEPS; // base size
// Launch Kernel
float* pi;
hipMallocManaged(&pi, sizeof(float));
pi_calculation << <dimGrid, dimBlock >> > (pi, STEPS, base, THREADS, BLOCKS);
// Sync
hipDeviceSynchronize();
// Multiply by base
*pi = (*pi) * base;
// Output Results
gettimeofday(&eend, NULL);
printf("Result = %20.18lf (%ld)\n", *pi, get_millisec(start, eend));
hipFree(pi);
gettimeofday(&eend, NULL);
return 0;
}
|
2e565ee7b6b849e51211ba9ad5e688a35ce71e1f.cu
|
// nvcc main_cuda_atom.cu -o main_cuda_atom
#include <stdio.h>
#include <sys/time.h>
long get_millisec(timeval &s, timeval &e){
long seconds = e.tv_sec - s.tv_sec; //seconds
long useconds = e.tv_usec - s.tv_usec; //milliseconds
return ((seconds) * 1000 + useconds/1000.0);
}
struct timeval start, eend;
#define STEPS 2000000000
#define BLOCKS 256
#define THREADS 256
int threadidx;
// Kernel
__global__ void pi_calculation(float* pi, int nsteps, float base, int nthreads, int nblocks)
{
int i;
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Calculate index for each thread
float acum = 0;
for (i = idx; i < nsteps; i += nthreads * nblocks)
{
x = (i + 0.5) * base;
acum += 4.0 / (1.0 + x * x); //Save result to device memory
}
atomicAdd(pi, acum);
}
int main(void)
{
gettimeofday(&start, NULL);
dim3 dimGrid(BLOCKS, 1, 1); // Grid dimensions
dim3 dimBlock(THREADS, 1, 1); // Block dimensions
float base = 1.0 / STEPS; // base size
// Launch Kernel
float* pi;
cudaMallocManaged(&pi, sizeof(float));
pi_calculation << <dimGrid, dimBlock >> > (pi, STEPS, base, THREADS, BLOCKS);
// Sync
cudaDeviceSynchronize();
// Multiply by base
*pi = (*pi) * base;
// Output Results
gettimeofday(&eend, NULL);
printf("Result = %20.18lf (%ld)\n", *pi, get_millisec(start, eend));
cudaFree(pi);
gettimeofday(&eend, NULL);
return 0;
}
|
interleaver.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: interleaver
% Description: Interleaves ULSCH data with RI and ACK control information
% Inputs: input_h Input bits
% ri_h RI control bits to interleave
% ack_h ACK control bits to interleave
% N_l Number of layers
% Qm Number of bits per modulation symbol
% Outputs: *output_h Output bits
By: Ahmad Nour
*/
#include "interleaver.cuh"
__global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Matricies
//Not to run more threads than available data
if (idx >= N_mat)
return;
if (idx < N_idx)
{
y_idx_d[idx] = 100;
y_mat_d[idx] = 0;
}
else
{
y_mat_d[idx] = 0;
}
}
__global__ void interleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits)
{
int col = threadIdx.x;
int row = blockIdx.y;
int idx = row * blockDim.x + col;
int C_mux = 12;
int Ncol = blockDim.x;
//Not to run more threads than available data
if (row >= N_ri_bits)
return;
Byte ri_column_set[4] = { 1, 10, 7, 4 };
//Byte ack_column_set[4] = { 2, 9, 8, 3 };
int r = R_prime_mux - 1 - (row / 4);
int C_ri = ri_column_set[(row % 4)];
y_idx_d[r*C_mux + C_ri] = 1;
y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col] = ri_d[row * Ncol + col];
}
__global__ void interleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* input_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l)
{
const int Ncol = blockDim.x; //Total number of columns
int col = threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int idx = row * Ncol + col;
const int C_mux = 12;
//Not to run more threads than available data
if (row >= numThreads)
return;
int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn:
// firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12
if (row < firstRI_row) //No RI bits in this range
{
y_idx_d[row] = 1;
y_mat_d[row*(Qm*N_l) + col] = input_d[row * (Qm*N_l) + col];
}
else
{
/*
Now, we reshape the matrix to be of (12 cols):
idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c
Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes
So, to map the data to indices where no RI bits exist, this equation is applied:
col = col + (col / 2) + (col % 2);
*/
int old_mapping = (row - firstRI_row);
int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2);
int new_row = row + (new_mapping - old_mapping);
y_idx_d[new_row] = 1;
y_mat_d[new_row*(Qm*N_l) + col] = input_d[row * (Qm*N_l) + col];
}
}
__global__ void serialOut(Byte* output_d, Byte* y_mat_d, int Nrows, int Qm, int N_l) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int idx = y * blockDim.x + x + z * (Nrows * blockDim.x);
const int C_mux = 12;
//Not to run more threads than available data
if (y >= Nrows)
return;
output_d[idx] = y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x];
}
void interleaver(Byte* input_d, Byte* ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte *y_mat_d)
{
//Reshape parameters
int data_vec_len = Qm*N_l;
int ri_vec_len = Qm*N_l;
int N_data_bits = N / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
//Interleaver
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
//Step 2: Define R_mux and R_prime_mux
int H_prime = N_data_bits;
int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
//Calc. number of needed threads for calling kernel(s)
int numThreads = (C_mux*R_mux);
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
initializeMatricies << <gridDim, blockDim , 0, stream_default>> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux));
// Step 3: Interleave the RI control bits
if (N_ri != 0)
{
//Calc. number of needed threads for calling kernel(s)
numThreads = N_ri_bits;
int rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
dim3 blockDim( Qm*N_l,1 );
dim3 gridDim( 1,rows);
interleaveRI << <gridDim, blockDim , 0, stream_default>> > (y_idx_d, y_mat_d, ri_d, R_prime_mux, numThreads);
}
// Step 4: Interleave the data bits
//Calc. number of needed threads for calling kernel(s)
numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l)
int rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l));
int gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_2(Qm*N_l, rows);
dim3 gridDim_2(1, gridY);
interleaveData << <gridDim_2, blockDim_2 , 0, stream_default>> >(y_idx_d, y_mat_d, input_d, numThreads, H_prime_total, N_ri_bits, Qm, N_l);
// Step 6: Read out the bits
//Calc. number of needed threads for calling kernel(s)
numThreads = C_mux * R_prime_mux * H_vec_len;
rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*H_vec_len));
gridY = numThreads / (rows*(C_mux*H_vec_len)) + (numThreads % (rows*(C_mux*H_vec_len)) == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_3(H_vec_len, rows, C_mux);
dim3 gridDim_3(1,gridY);
serialOut << <gridDim_3, blockDim_3 , 0, stream_default>> >(*output_d, y_mat_d, R_prime_mux, Qm, N_l);
}
|
interleaver.cu
|
/*
% Function: interleaver
% Description: Interleaves ULSCH data with RI and ACK control information
% Inputs: input_h Input bits
% ri_h RI control bits to interleave
% ack_h ACK control bits to interleave
% N_l Number of layers
% Qm Number of bits per modulation symbol
% Outputs: *output_h Output bits
By: Ahmad Nour
*/
#include "interleaver.cuh"
__global__ void initializeMatricies(Byte* y_idx_d, Byte* y_mat_d, int N_idx, int N_mat)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//initialize Matricies
//Not to run more threads than available data
if (idx >= N_mat)
return;
if (idx < N_idx)
{
y_idx_d[idx] = 100;
y_mat_d[idx] = 0;
}
else
{
y_mat_d[idx] = 0;
}
}
__global__ void interleaveRI(Byte* y_idx_d, Byte* y_mat_d, Byte* ri_d, int R_prime_mux, int N_ri_bits)
{
int col = threadIdx.x;
int row = blockIdx.y;
int idx = row * blockDim.x + col;
int C_mux = 12;
int Ncol = blockDim.x;
//Not to run more threads than available data
if (row >= N_ri_bits)
return;
Byte ri_column_set[4] = { 1, 10, 7, 4 };
//Byte ack_column_set[4] = { 2, 9, 8, 3 };
int r = R_prime_mux - 1 - (row / 4);
int C_ri = ri_column_set[(row % 4)];
y_idx_d[r*C_mux + C_ri] = 1;
y_mat_d[C_mux*r*Ncol + C_ri*Ncol + col] = ri_d[row * Ncol + col];
}
__global__ void interleaveData(Byte* y_idx_d, Byte* y_mat_d, Byte* input_d, int numThreads, int H_prime_total, int N_ri_bits, int Qm, int N_l)
{
const int Ncol = blockDim.x; //Total number of columns
int col = threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int idx = row * Ncol + col;
const int C_mux = 12;
//Not to run more threads than available data
if (row >= numThreads)
return;
int firstRI_row = H_prime_total - (N_ri_bits * 3); // The original eqn:
// firstRI_row = ((H_prime_total/12) - (N_N_ri_bits / 4))*12
if (row < firstRI_row) //No RI bits in this range
{
y_idx_d[row] = 1;
y_mat_d[row*(Qm*N_l) + col] = input_d[row * (Qm*N_l) + col];
}
else
{
/*
Now, we reshape the matrix to be of (12 cols):
idx 0 1 2 3 4 5 6 7 8 9 10 11701b4032c
Data can be put? (No RI) yes no yes yes no yes yes no yes yes no yes
So, to map the data to indices where no RI bits exist, this equation is applied:
col = col + (col / 2) + (col % 2);
*/
int old_mapping = (row - firstRI_row);
int new_mapping = old_mapping + (old_mapping / 2) + (old_mapping % 2);
int new_row = row + (new_mapping - old_mapping);
y_idx_d[new_row] = 1;
y_mat_d[new_row*(Qm*N_l) + col] = input_d[row * (Qm*N_l) + col];
}
}
__global__ void serialOut(Byte* output_d, Byte* y_mat_d, int Nrows, int Qm, int N_l) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
int idx = y * blockDim.x + x + z * (Nrows * blockDim.x);
const int C_mux = 12;
//Not to run more threads than available data
if (y >= Nrows)
return;
output_d[idx] = y_mat_d[y*C_mux*Qm*N_l + z*Qm*N_l + x];
}
void interleaver(Byte* input_d, Byte* ri_d, Byte** output_d, const int N, const int N_ri, const int Qm, const int N_l, Byte* y_idx_d, Byte *y_mat_d)
{
//Reshape parameters
int data_vec_len = Qm*N_l;
int ri_vec_len = Qm*N_l;
int N_data_bits = N / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
//Interleaver
// Step 1: Define C_mux
int C_mux = N_pusch_symbs;
//Step 2: Define R_mux and R_prime_mux
int H_prime = N_data_bits;
int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / C_mux;
int R_prime_mux = R_mux / (Qm*N_l);
//Calc. number of needed threads for calling kernel(s)
int numThreads = (C_mux*R_mux);
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
initializeMatricies << <gridDim, blockDim , 0, stream_default>> > (y_idx_d, y_mat_d, (C_mux*R_prime_mux), (C_mux*R_mux));
// Step 3: Interleave the RI control bits
if (N_ri != 0)
{
//Calc. number of needed threads for calling kernel(s)
numThreads = N_ri_bits;
int rows = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
dim3 blockDim( Qm*N_l,1 );
dim3 gridDim( 1,rows);
interleaveRI << <gridDim, blockDim , 0, stream_default>> > (y_idx_d, y_mat_d, ri_d, R_prime_mux, numThreads);
}
// Step 4: Interleave the data bits
//Calc. number of needed threads for calling kernel(s)
numThreads = H_prime; //Actually, it's number of required rows or it's total_threads / (Qm*N_l)
int rows = (numThreads < (1024/ (Qm*N_l))) ? numThreads : (1024/ (Qm*N_l));
int gridY = numThreads / (rows)+(numThreads % rows == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_2(Qm*N_l, rows);
dim3 gridDim_2(1, gridY);
interleaveData << <gridDim_2, blockDim_2 , 0, stream_default>> >(y_idx_d, y_mat_d, input_d, numThreads, H_prime_total, N_ri_bits, Qm, N_l);
// Step 6: Read out the bits
//Calc. number of needed threads for calling kernel(s)
numThreads = C_mux * R_prime_mux * H_vec_len;
rows = (numThreads < (1024)) ? numThreads : (1024 / (C_mux*H_vec_len));
gridY = numThreads / (rows*(C_mux*H_vec_len)) + (numThreads % (rows*(C_mux*H_vec_len)) == 0 ? 0 : 1); //grid size in bloack (min 1)
dim3 blockDim_3(H_vec_len, rows, C_mux);
dim3 gridDim_3(1,gridY);
serialOut << <gridDim_3, blockDim_3 , 0, stream_default>> >(*output_d, y_mat_d, R_prime_mux, Qm, N_l);
}
|
fc4bec8567b3bdbf9990370f3e5ccd5435f962a2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "gpuinfo.h"
#include <hip/hip_runtime.h>
#define BUF_BYTES 256
GPUInfo* CreateGPUInfo()
{
return new GPUInfo;
}
GPUInfo::GPUInfo()
{
hipError_t error_id = hipGetDeviceCount(&m_nGPUS);
if(error_id != hipSuccess)
{
m_nGPUS = 0;
}
}
std::vector<string> GPUInfo::GetGPUProps(uint32_t idx)
{
char buf[BUF_BYTES];
// TODO: verify index is in bounds
vector<string> strlist;
snprintf(buf, BUF_BYTES, "GPU index %d:", idx);
strlist.push_back(buf);
#if 1
int32_t deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
snprintf(buf, BUF_BYTES, "cuda error obtaining device count: %s", hipGetErrorString(error_id));
strlist.push_back(buf);
return strlist;
}
if((int32_t)idx > deviceCount - 1 )
{
snprintf(buf, BUF_BYTES, "Invalid index %d. cuda device count: %d", idx, deviceCount);
strlist.push_back(buf);
return strlist;
}
int32_t dev = idx;
int32_t driverVersion = 0, runtimeVersion = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
snprintf(buf, BUF_BYTES, "\nDevice %d: \"%s\"\n", dev, deviceProp.name);
strlist.push_back(buf);
// Console log
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
snprintf(buf, BUF_BYTES, " CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
strlist.push_back(buf);
// snprintf(buf, BUF_BYTES, " (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
// deviceProp.multiProcessorCount,
// _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
// _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
strlist.push_back(buf);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
snprintf(buf, BUF_BYTES, " Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth);
strlist.push_back(buf);
if (deviceProp.l2CacheSize)
{
snprintf(buf, BUF_BYTES, " L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize);
strlist.push_back(buf);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API)
int memoryClock;
getCudaAttribute<int>(&memoryClock, hipDeviceAttributeMemoryClockRate, dev);
snprintf(buf, BUF_BYTES, " Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f);
strlist.push_back(buf);
int memBusWidth;
getCudaAttribute<int>(&memBusWidth, hipDeviceAttributeMemoryBusWidth, dev);
snprintf(buf, BUF_BYTES, " Memory Bus Width: %d-bit\n", memBusWidth);
strlist.push_back(buf);
int L2CacheSize;
getCudaAttribute<int>(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev);
if (L2CacheSize)
{
snprintf(buf, BUF_BYTES, " L2 Cache Size: %d bytes\n", L2CacheSize);
strlist.push_back(buf);
}
#endif
snprintf(buf, BUF_BYTES, " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Warp size: %d\n", deviceProp.warpSize);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");
strlist.push_back(buf);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf(buf, BUF_BYTES, " CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
strlist.push_back(buf);
#endif
snprintf(buf, BUF_BYTES, " Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
strlist.push_back(buf);
const char *sComputeMode[] =
{
"Default (multiple host threads can use ::hipSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::hipSetDevice() with this device)",
"Prohibited (no host thread can use ::hipSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::hipSetDevice() with this device)",
"Unknown",
NULL
};
snprintf(buf, BUF_BYTES, " Compute Mode:\n");
strlist.push_back(buf);
if( 6 > deviceProp.computeMode)
{
snprintf(buf, BUF_BYTES, " < %s >\n", sComputeMode[deviceProp.computeMode]);
strlist.push_back(buf);
}
else
{
snprintf(buf, BUF_BYTES, " Unexpected computeMode %d\n",deviceProp.computeMode);
strlist.push_back(buf);
}
// If there are 2 or more GP
//hipDeviceProp_t prop[64];
//checkCudaErrors(hipGetDeviceProperties(&prop[dev], dev));
// Only boards based on Fermi or later can support P2P
if ((deviceProp.major >= 2)
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled to support this
&& deviceProp.tccDriver
#endif
)
{
// This is an array of P2P capable GPUs
strlist.push_back("Peer to peer access capable!");
}
// csv masterlog info
// *****************************
// exe and CUDA driver name
snprintf(buf, BUF_BYTES, "\n");
std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
// driver version
sProfileString += ", CUDA Driver Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf(cTemp, 16, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#else
snprintf(cTemp, 16, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#endif
sProfileString += cTemp;
// Runtime version
sProfileString += ", CUDA Runtime Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf_s(cTemp, 16, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#else
snprintf(buf, BUF_BYTES, cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#endif
sProfileString += cTemp;
// Device count
sProfileString += ", NumDevs = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, "%d", deviceCount);
#else
snprintf(buf, BUF_BYTES, cTemp, "%d", deviceCount);
#endif
sProfileString += cTemp;
sProfileString += "\n";
strlist.push_back(sProfileString);
#endif
return strlist;
}
void* GPUInfo::AllocMem(uint32_t nMB)
{
hipError_t ret = hipMalloc(&m_dvcBufPtr, nMB * 0x00100000);
if(hipSuccess != ret)
{
m_dvcBufPtr = nullptr;
}
return m_dvcBufPtr;
}
|
fc4bec8567b3bdbf9990370f3e5ccd5435f962a2.cu
|
#include "gpuinfo.h"
#include <cuda_runtime.h>
#define BUF_BYTES 256
GPUInfo* CreateGPUInfo()
{
return new GPUInfo;
}
GPUInfo::GPUInfo()
{
cudaError_t error_id = cudaGetDeviceCount(&m_nGPUS);
if(error_id != cudaSuccess)
{
m_nGPUS = 0;
}
}
std::vector<string> GPUInfo::GetGPUProps(uint32_t idx)
{
char buf[BUF_BYTES];
// TODO: verify index is in bounds
vector<string> strlist;
snprintf(buf, BUF_BYTES, "GPU index %d:", idx);
strlist.push_back(buf);
#if 1
int32_t deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
snprintf(buf, BUF_BYTES, "cuda error obtaining device count: %s", cudaGetErrorString(error_id));
strlist.push_back(buf);
return strlist;
}
if((int32_t)idx > deviceCount - 1 )
{
snprintf(buf, BUF_BYTES, "Invalid index %d. cuda device count: %d", idx, deviceCount);
strlist.push_back(buf);
return strlist;
}
int32_t dev = idx;
int32_t driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
snprintf(buf, BUF_BYTES, "\nDevice %d: \"%s\"\n", dev, deviceProp.name);
strlist.push_back(buf);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
snprintf(buf, BUF_BYTES, " CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
strlist.push_back(buf);
// snprintf(buf, BUF_BYTES, " (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
// deviceProp.multiProcessorCount,
// _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
// _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
// strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
strlist.push_back(buf);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
snprintf(buf, BUF_BYTES, " Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth);
strlist.push_back(buf);
if (deviceProp.l2CacheSize)
{
snprintf(buf, BUF_BYTES, " L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize);
strlist.push_back(buf);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API)
int memoryClock;
getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);
snprintf(buf, BUF_BYTES, " Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f);
strlist.push_back(buf);
int memBusWidth;
getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);
snprintf(buf, BUF_BYTES, " Memory Bus Width: %d-bit\n", memBusWidth);
strlist.push_back(buf);
int L2CacheSize;
getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);
if (L2CacheSize)
{
snprintf(buf, BUF_BYTES, " L2 Cache Size: %d bytes\n", L2CacheSize);
strlist.push_back(buf);
}
#endif
snprintf(buf, BUF_BYTES, " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Warp size: %d\n", deviceProp.warpSize);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Maximum memory pitch: %lu bytes\n", deviceProp.memPitch);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Texture alignment: %lu bytes\n", deviceProp.textureAlignment);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");
strlist.push_back(buf);
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf(buf, BUF_BYTES, " CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");
strlist.push_back(buf);
#endif
snprintf(buf, BUF_BYTES, " Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");
strlist.push_back(buf);
snprintf(buf, BUF_BYTES, " Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
strlist.push_back(buf);
const char *sComputeMode[] =
{
"Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",
"Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this device)",
"Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",
"Unknown",
NULL
};
snprintf(buf, BUF_BYTES, " Compute Mode:\n");
strlist.push_back(buf);
if( 6 > deviceProp.computeMode)
{
snprintf(buf, BUF_BYTES, " < %s >\n", sComputeMode[deviceProp.computeMode]);
strlist.push_back(buf);
}
else
{
snprintf(buf, BUF_BYTES, " Unexpected computeMode %d\n",deviceProp.computeMode);
strlist.push_back(buf);
}
// If there are 2 or more GP
//cudaDeviceProp prop[64];
//checkCudaErrors(cudaGetDeviceProperties(&prop[dev], dev));
// Only boards based on Fermi or later can support P2P
if ((deviceProp.major >= 2)
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled to support this
&& deviceProp.tccDriver
#endif
)
{
// This is an array of P2P capable GPUs
strlist.push_back("Peer to peer access capable!");
}
// csv masterlog info
// *****************************
// exe and CUDA driver name
snprintf(buf, BUF_BYTES, "\n");
std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
// driver version
sProfileString += ", CUDA Driver Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf(cTemp, 16, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#else
snprintf(cTemp, 16, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#endif
sProfileString += cTemp;
// Runtime version
sProfileString += ", CUDA Runtime Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
snprintf_s(cTemp, 16, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#else
snprintf(buf, BUF_BYTES, cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#endif
sProfileString += cTemp;
// Device count
sProfileString += ", NumDevs = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, "%d", deviceCount);
#else
snprintf(buf, BUF_BYTES, cTemp, "%d", deviceCount);
#endif
sProfileString += cTemp;
sProfileString += "\n";
strlist.push_back(sProfileString);
#endif
return strlist;
}
void* GPUInfo::AllocMem(uint32_t nMB)
{
cudaError_t ret = cudaMalloc(&m_dvcBufPtr, nMB * 0x00100000);
if(cudaSuccess != ret)
{
m_dvcBufPtr = nullptr;
}
return m_dvcBufPtr;
}
|
62557ea91276b7defd9a339c88fc7d2950b703be.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/sort/group_single_pass_reduction_util.cuh>
#include <cudf/detail/gather.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_argmax(
column_view const& values,
size_type num_groups,
rmm::device_vector<size_type> const& group_labels,
column_view const& key_sort_order,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto indices = type_dispatcher(
values.type(), reduce_functor<aggregation::ARGMAX>{},
values, num_groups, group_labels,
rmm::mr::get_default_resource(), stream);
// The functor returns the index of maximum in the sorted values.
// We need the index of maximum in the original unsorted values.
// So use indices to gather the sort order used to sort `values`.
// Gather map cannot be null so we make a view with the mask removed.
// The values in data buffer of indices corresponding to null values was
// initialized to ARGMAX_SENTINEL which is an out of bounds index value (-1)
// and causes the gathered value to be null.
column_view null_removed_indices(data_type(type_to_id<size_type>()),
indices->size(),
static_cast<void const*>(indices->view().template data<size_type>()));
auto result_table = cudf::experimental::detail::gather(
table_view({key_sort_order}), null_removed_indices,
false, indices->nullable(), false,
mr, stream);
return std::move(result_table->release()[0]);
}
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
|
62557ea91276b7defd9a339c88fc7d2950b703be.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/sort/group_single_pass_reduction_util.cuh>
#include <cudf/detail/gather.hpp>
#include <thrust/transform.h>
namespace cudf {
namespace experimental {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_argmax(
column_view const& values,
size_type num_groups,
rmm::device_vector<size_type> const& group_labels,
column_view const& key_sort_order,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto indices = type_dispatcher(
values.type(), reduce_functor<aggregation::ARGMAX>{},
values, num_groups, group_labels,
rmm::mr::get_default_resource(), stream);
// The functor returns the index of maximum in the sorted values.
// We need the index of maximum in the original unsorted values.
// So use indices to gather the sort order used to sort `values`.
// Gather map cannot be null so we make a view with the mask removed.
// The values in data buffer of indices corresponding to null values was
// initialized to ARGMAX_SENTINEL which is an out of bounds index value (-1)
// and causes the gathered value to be null.
column_view null_removed_indices(data_type(type_to_id<size_type>()),
indices->size(),
static_cast<void const*>(indices->view().template data<size_type>()));
auto result_table = cudf::experimental::detail::gather(
table_view({key_sort_order}), null_removed_indices,
false, indices->nullable(), false,
mr, stream);
return std::move(result_table->release()[0]);
}
} // namespace detail
} // namespace groupby
} // namespace experimental
} // namespace cudf
|
fba43dbc4899b84efcfbdffacae95471caf6ac1c.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "distances.cuh"
#include "exact_kernels.cuh"
#include "utils.cuh"
#include "barnes_hut.cuh"
#include "exact_tsne.cuh"
namespace ML {
void TSNE_fit(const raft::handle_t &handle, const float *X, float *Y,
const int n, const int p, const int dim, int n_neighbors,
const float theta, const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
int verbosity, const bool intialize_embeddings, bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
ML::Logger::get().setLevel(verbosity);
if (dim > 2 and barnes_hut) {
barnes_hut = false;
CUML_LOG_WARN(
"Barnes Hut only works for dim == 2. Switching to exact solution.");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
CUML_LOG_WARN("FAISS only supports maximum n_neighbors = 1023.");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
CUML_LOG_DEBUG("Data size = (%d, %d) with dim = %d perplexity = %f", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
CUML_LOG_WARN(
"Perplexity should be within ranges (5, 50). Your results might be a"
" bit strange...");
if (n_neighbors < perplexity * 3.0f)
CUML_LOG_WARN(
"# of Nearest Neighbors should be at least 3 * perplexity. Your results"
" might be a bit strange...");
auto d_alloc = handle.get_device_allocator();
hipStream_t stream = handle.get_stream();
START_TIMER;
//---------------------------------------------------
// Get distances
CUML_LOG_DEBUG("Getting distances.");
MLCommon::device_buffer<float> distances(d_alloc, stream, n * n_neighbors);
MLCommon::device_buffer<long> indices(d_alloc, stream, n * n_neighbors);
TSNE::get_distances(X, n, p, indices.data(), distances.data(), n_neighbors,
d_alloc, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
CUML_LOG_DEBUG("Now normalizing distances so exp(D) doesn't explode.");
TSNE::normalize_distances(n, distances.data(), n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
CUML_LOG_DEBUG("Searching for optimal perplexity via bisection search.");
MLCommon::device_buffer<float> P(d_alloc, stream, n * n_neighbors);
TSNE::perplexity_search(distances.data(), P.data(), perplexity,
perplexity_max_iter, perplexity_tol, n, n_neighbors,
handle);
distances.release(stream);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix(d_alloc, stream);
TSNE::symmetrize_perplexity(P.data(), indices.data(), n, n_neighbors,
early_exaggeration, &COO_Matrix, stream, handle);
P.release(stream);
indices.release(stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals();
const int *COL = COO_Matrix.cols();
const int *ROW = COO_Matrix.rows();
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, intialize_embeddings);
}
}
} // namespace ML
|
fba43dbc4899b84efcfbdffacae95471caf6ac1c.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/manifold/tsne.h>
#include <common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "distances.cuh"
#include "exact_kernels.cuh"
#include "utils.cuh"
#include "barnes_hut.cuh"
#include "exact_tsne.cuh"
namespace ML {
void TSNE_fit(const raft::handle_t &handle, const float *X, float *Y,
const int n, const int p, const int dim, int n_neighbors,
const float theta, const float epssq, float perplexity,
const int perplexity_max_iter, const float perplexity_tol,
const float early_exaggeration, const int exaggeration_iter,
const float min_gain, const float pre_learning_rate,
const float post_learning_rate, const int max_iter,
const float min_grad_norm, const float pre_momentum,
const float post_momentum, const long long random_state,
int verbosity, const bool intialize_embeddings, bool barnes_hut) {
ASSERT(n > 0 && p > 0 && dim > 0 && n_neighbors > 0 && X != NULL && Y != NULL,
"Wrong input args");
ML::Logger::get().setLevel(verbosity);
if (dim > 2 and barnes_hut) {
barnes_hut = false;
CUML_LOG_WARN(
"Barnes Hut only works for dim == 2. Switching to exact solution.");
}
if (n_neighbors > n) n_neighbors = n;
if (n_neighbors > 1023) {
CUML_LOG_WARN("FAISS only supports maximum n_neighbors = 1023.");
n_neighbors = 1023;
}
// Perplexity must be less than number of datapoints
// "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/
if (perplexity > n) perplexity = n;
CUML_LOG_DEBUG("Data size = (%d, %d) with dim = %d perplexity = %f", n, p,
dim, perplexity);
if (perplexity < 5 or perplexity > 50)
CUML_LOG_WARN(
"Perplexity should be within ranges (5, 50). Your results might be a"
" bit strange...");
if (n_neighbors < perplexity * 3.0f)
CUML_LOG_WARN(
"# of Nearest Neighbors should be at least 3 * perplexity. Your results"
" might be a bit strange...");
auto d_alloc = handle.get_device_allocator();
cudaStream_t stream = handle.get_stream();
START_TIMER;
//---------------------------------------------------
// Get distances
CUML_LOG_DEBUG("Getting distances.");
MLCommon::device_buffer<float> distances(d_alloc, stream, n * n_neighbors);
MLCommon::device_buffer<long> indices(d_alloc, stream, n * n_neighbors);
TSNE::get_distances(X, n, p, indices.data(), distances.data(), n_neighbors,
d_alloc, stream);
//---------------------------------------------------
END_TIMER(DistancesTime);
START_TIMER;
//---------------------------------------------------
// Normalize distances
CUML_LOG_DEBUG("Now normalizing distances so exp(D) doesn't explode.");
TSNE::normalize_distances(n, distances.data(), n_neighbors, stream);
//---------------------------------------------------
END_TIMER(NormalizeTime);
START_TIMER;
//---------------------------------------------------
// Optimal perplexity
CUML_LOG_DEBUG("Searching for optimal perplexity via bisection search.");
MLCommon::device_buffer<float> P(d_alloc, stream, n * n_neighbors);
TSNE::perplexity_search(distances.data(), P.data(), perplexity,
perplexity_max_iter, perplexity_tol, n, n_neighbors,
handle);
distances.release(stream);
//---------------------------------------------------
END_TIMER(PerplexityTime);
START_TIMER;
//---------------------------------------------------
// Convert data to COO layout
MLCommon::Sparse::COO<float> COO_Matrix(d_alloc, stream);
TSNE::symmetrize_perplexity(P.data(), indices.data(), n, n_neighbors,
early_exaggeration, &COO_Matrix, stream, handle);
P.release(stream);
indices.release(stream);
const int NNZ = COO_Matrix.nnz;
float *VAL = COO_Matrix.vals();
const int *COL = COO_Matrix.cols();
const int *ROW = COO_Matrix.rows();
//---------------------------------------------------
END_TIMER(SymmetrizeTime);
if (barnes_hut) {
TSNE::Barnes_Hut(VAL, COL, ROW, NNZ, handle, Y, n, theta, epssq,
early_exaggeration, exaggeration_iter, min_gain,
pre_learning_rate, post_learning_rate, max_iter,
min_grad_norm, pre_momentum, post_momentum, random_state);
} else {
TSNE::Exact_TSNE(VAL, COL, ROW, NNZ, handle, Y, n, dim, early_exaggeration,
exaggeration_iter, min_gain, pre_learning_rate,
post_learning_rate, max_iter, min_grad_norm, pre_momentum,
post_momentum, random_state, intialize_embeddings);
}
}
} // namespace ML
|
62e3fb092bfd57070589c44683d5a1d217f0519c.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// CUDA-aware Open MPI
// Now, the Open MPI library will automatically detect that the pointer being passed in is a CUDA device memory pointer and do the right thing. This is referred to as CUDA-aware support.
// https://www.open-mpi.org/faq/?category=runcuda
// https://devblogs.nvidia.com/parallelforall/introduction-cuda-aware-mpi/
//
extern "C" {
#include "bottom_up.h"
}
#include <string.h>
#include "oned_csr.h"
#include "constants.h"
#include "bfs.h"
#include "print.h"
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper functions for SDK examples
//
// !!! Assume SIZE_MUST_BE_A_POWER_OF_TWO !!!
//
__device__ int rank_g;
__device__ int size_g;
__device__ int lgsize_g;
__device__ int64_t nlocalverts_g;
struct Context {
int rank;
int size;
int lgsize;
int64_t nlocalverts;
};
__device__ void set_context(Context context) {
rank_g = context.rank;
size_g = context.size;
lgsize_g = context.lgsize;
nlocalverts_g = context.nlocalverts;
}
// __device__ int64_t mode_size(int64_t v) {
// return v & ((1 << lgsize_g) - 1);
// }
// __device__ int64_t div_size(int64_t v) {
// return v >> lgsize_g;
// }
// __device__ int64_t mul_size(int64_t v) {
// return v << lgsize_g;
// }
// __device__ int vertex_owner(int64_t v) {
// return mode_size(v);
// }
// __device__ size_t vertex_local(int64_t v) {
// return div_size(v);
// }
// __device__ int64_t vertex_to_global(int r, int64_t i) {
// return mul_size(i) + r;
// }
#define LONG_BITS_G 64
// __device__ void set_local(int64_t v, int64_t *a) { // x / 64 --> x >> 6 ??
// a[vertex_local(v) / LONG_BITS_G] |= (1UL << (vertex_local(v) % LONG_BITS_G));
// }
// __device__ int test_local(int64_t v, int64_t *a) {
// return 0 != (a[vertex_local(v) / LONG_BITS_G] & (1UL << (vertex_local(v) % LONG_BITS_G)));
// }
// __device__ void set_global(int64_t v, int64_t *a) { // x / 64 --> x >> 6 ??
// a[v / LONG_BITS_G] |= (1UL << (v % LONG_BITS_G));
// }
// __device__ void set_global_atomic(int64_t v, int64_t *a) {
// atomicOr((unsigned long long int*)(&a[v / LONG_BITS_G]), 1UL << (v % LONG_BITS_G));
// }
// __device__ int test_global(int64_t v, int64_t *a) {
// return 0 != (a[v / LONG_BITS_G] & (1UL << (v % LONG_BITS_G)));
// }
#define MOD_SIZE_G(v) ((v) & ((1 << lgsize_g) - 1))
#define DIV_SIZE_G(v) ((v) >> lgsize_g)
#define MUL_SIZE_G(x) ((x) << lgsize_g)
#define VERTEX_OWNER_G(v) ((int)(MOD_SIZE_G(v)))
#define VERTEX_LOCAL_G(v) ((size_t)(DIV_SIZE_G(v)))
#define VERTEX_TO_GLOBAL_G(r, i) ((int64_t)(MUL_SIZE_G((uint64_t)i) + (int)(r)))
// #define LONG_BITS_G (sizeof(unsigned long) * CHAR_BIT)
#define LONG_BITS_G 64
#define SET_LOCAL_G(v, a) do {(a)[VERTEX_LOCAL_G((v)) / LONG_BITS_G] |= (1UL << (VERTEX_LOCAL_G((v)) % LONG_BITS_G));} while (0)
#define TEST_LOCA_G(v, a) (((a)[VERTEX_LOCAL_G((v)) / LONG_BITS_G] & (1UL << (VERTEX_LOCAL_G((v)) % LONG_BITS_G))) != 0)
#define SET_GLOBAL_G(v, a) do {(a)[(v) / LONG_BITS_G] |= (1UL << ((v) % LONG_BITS_G));} while (0)
#define SET_GLOBAL_ATOMIC_G(v, a) do {atomicOr((unsigned long long int*)(&(a)[(v) / LONG_BITS_G]), 1UL << ((v) % LONG_BITS_G));} while (0)
#define TEST_GLOBAL_G(v, a) (((a)[(v) / LONG_BITS_G] & (1UL << ((v) % LONG_BITS_G))) != 0)
extern oned_csr_graph g;
extern int64_t *pred;
extern int64_t *frontier;
extern int64_t *frontier_next;
#define BLOCK_X 64
// do_nothing kernel
// for bottom up
// one thread do one vertex
// one thread try to find parent in its all neighbour
void dim_do_nothing(dim3* dimGrid, dim3 *dimBlock) {
*dimGrid = dim3((g.nlocalverts + BLOCK_X - 1) / BLOCK_X, 1, 1); // number of block
*dimBlock = dim3(BLOCK_X, 1, 1); // number of thread per block
}
__global__ void do_nothing(
int64_t *rowstarts_g,
int64_t *column_g,
int64_t *frontier_g,
int64_t *frontier_next_g,
int64_t *pred_g,
Context context) {
set_context(context);
const int block_base = blockIdx.x * blockDim.x;
const int64_t i = block_base + threadIdx.x;
__syncthreads();
if (i >= nlocalverts_g)
return ;
if (pred_g[i] == -1) {
int j;
for (j = (int) rowstarts_g[i]; j < rowstarts_g[i + 1]; j++) {
int64_t parent_global = column_g[j];
if (TEST_GLOBAL_G(parent_global, frontier_g)) {
pred_g[i] = parent_global;
SET_GLOBAL_ATOMIC_G(VERTEX_TO_GLOBAL_G(rank_g, i), frontier_next_g);
break;
}
}
}
}
int64_t *rowstarts_g;
int size_rowstarts;
int64_t *column_g;
int size_column;
int64_t *pred_g;
int size_pred_g;
int64_t *frontier_g;
int size_frontier_g;
int64_t *frontier_next_g;
int size_frontier_next_g;
// transfer graph to gpu global memory
// should perform only once
void init_bottom_up_gpu() {
size_rowstarts = (g.nlocalverts + 1) * sizeof(int64_t);
size_column = g.rowstarts[g.nlocalverts] * sizeof(int64_t);
hipMalloc((void **)&rowstarts_g, size_rowstarts);
hipMalloc((void **)&column_g, size_column);
hipMemcpy(rowstarts_g, g.rowstarts, size_rowstarts, hipMemcpyHostToDevice);
hipMemcpy(column_g, g.column, size_column, hipMemcpyHostToDevice);
// here assume pred always reside in GPU
// from beginning to end
// only when everythiing is done
// transfer pred back to CPU
size_pred_g = g.nlocalverts * sizeof(int64_t);
hipMalloc((void **)&pred_g, size_pred_g);
// hipMemcpy(pred_g, pred, size_pred_g, hipMemcpyHostToDevice);
size_frontier_g = global_long_nb;
hipMalloc((void **)&frontier_g, size_frontier_g);
size_frontier_next_g = global_long_nb;
hipMalloc((void **)&frontier_next_g, size_frontier_next_g);
}
void pred_to_gpu() {
hipMemcpy(pred_g, pred, size_pred_g, hipMemcpyHostToDevice);
}
void pred_from_gpu() {
hipMemcpy(pred, pred_g, size_pred_g, hipMemcpyDeviceToHost);
}
void end_bottom_up_gpu() {
hipFree(rowstarts_g);
hipFree(column_g);
hipFree(pred_g);
hipFree(frontier_g);
hipFree(frontier_next_g);
}
void one_step_bottom_up_gpu() {
// transfer current frontier to gpu
hipMemcpy(frontier_g, frontier, size_frontier_g, hipMemcpyHostToDevice);
hipMemset(frontier_next_g, 0, size_frontier_next_g);
// get suitable dim
dim3 dimGrid;
dim3 dimBlock;
dim_do_nothing(&dimGrid, &dimBlock);
// launch gpu kernel
// it should compute frontier_next_g
Context context = { rank, size, lgsize, g.nlocalverts };
hipLaunchKernelGGL(( do_nothing), dim3(dimGrid), dim3(dimBlock), 0, 0, rowstarts_g, column_g, frontier_g, frontier_next_g, pred_g, context);
hipMemcpy(frontier_next, frontier_next_g, size_frontier_next_g, hipMemcpyDeviceToHost);
}
|
62e3fb092bfd57070589c44683d5a1d217f0519c.cu
|
//
// CUDA-aware Open MPI
// Now, the Open MPI library will automatically detect that the pointer being passed in is a CUDA device memory pointer and do the right thing. This is referred to as CUDA-aware support.
// https://www.open-mpi.org/faq/?category=runcuda
// https://devblogs.nvidia.com/parallelforall/introduction-cuda-aware-mpi/
//
extern "C" {
#include "bottom_up.h"
}
#include <string.h>
#include "oned_csr.h"
#include "constants.h"
#include "bfs.h"
#include "print.h"
// includes CUDA
#include <cuda_runtime.h>
// includes, project
//#include <helper_cuda.h>
//#include <helper_functions.h> // helper functions for SDK examples
//
// !!! Assume SIZE_MUST_BE_A_POWER_OF_TWO !!!
//
__device__ int rank_g;
__device__ int size_g;
__device__ int lgsize_g;
__device__ int64_t nlocalverts_g;
struct Context {
int rank;
int size;
int lgsize;
int64_t nlocalverts;
};
__device__ void set_context(Context context) {
rank_g = context.rank;
size_g = context.size;
lgsize_g = context.lgsize;
nlocalverts_g = context.nlocalverts;
}
// __device__ int64_t mode_size(int64_t v) {
// return v & ((1 << lgsize_g) - 1);
// }
// __device__ int64_t div_size(int64_t v) {
// return v >> lgsize_g;
// }
// __device__ int64_t mul_size(int64_t v) {
// return v << lgsize_g;
// }
// __device__ int vertex_owner(int64_t v) {
// return mode_size(v);
// }
// __device__ size_t vertex_local(int64_t v) {
// return div_size(v);
// }
// __device__ int64_t vertex_to_global(int r, int64_t i) {
// return mul_size(i) + r;
// }
#define LONG_BITS_G 64
// __device__ void set_local(int64_t v, int64_t *a) { // x / 64 --> x >> 6 ??
// a[vertex_local(v) / LONG_BITS_G] |= (1UL << (vertex_local(v) % LONG_BITS_G));
// }
// __device__ int test_local(int64_t v, int64_t *a) {
// return 0 != (a[vertex_local(v) / LONG_BITS_G] & (1UL << (vertex_local(v) % LONG_BITS_G)));
// }
// __device__ void set_global(int64_t v, int64_t *a) { // x / 64 --> x >> 6 ??
// a[v / LONG_BITS_G] |= (1UL << (v % LONG_BITS_G));
// }
// __device__ void set_global_atomic(int64_t v, int64_t *a) {
// atomicOr((unsigned long long int*)(&a[v / LONG_BITS_G]), 1UL << (v % LONG_BITS_G));
// }
// __device__ int test_global(int64_t v, int64_t *a) {
// return 0 != (a[v / LONG_BITS_G] & (1UL << (v % LONG_BITS_G)));
// }
#define MOD_SIZE_G(v) ((v) & ((1 << lgsize_g) - 1))
#define DIV_SIZE_G(v) ((v) >> lgsize_g)
#define MUL_SIZE_G(x) ((x) << lgsize_g)
#define VERTEX_OWNER_G(v) ((int)(MOD_SIZE_G(v)))
#define VERTEX_LOCAL_G(v) ((size_t)(DIV_SIZE_G(v)))
#define VERTEX_TO_GLOBAL_G(r, i) ((int64_t)(MUL_SIZE_G((uint64_t)i) + (int)(r)))
// #define LONG_BITS_G (sizeof(unsigned long) * CHAR_BIT)
#define LONG_BITS_G 64
#define SET_LOCAL_G(v, a) do {(a)[VERTEX_LOCAL_G((v)) / LONG_BITS_G] |= (1UL << (VERTEX_LOCAL_G((v)) % LONG_BITS_G));} while (0)
#define TEST_LOCA_G(v, a) (((a)[VERTEX_LOCAL_G((v)) / LONG_BITS_G] & (1UL << (VERTEX_LOCAL_G((v)) % LONG_BITS_G))) != 0)
#define SET_GLOBAL_G(v, a) do {(a)[(v) / LONG_BITS_G] |= (1UL << ((v) % LONG_BITS_G));} while (0)
#define SET_GLOBAL_ATOMIC_G(v, a) do {atomicOr((unsigned long long int*)(&(a)[(v) / LONG_BITS_G]), 1UL << ((v) % LONG_BITS_G));} while (0)
#define TEST_GLOBAL_G(v, a) (((a)[(v) / LONG_BITS_G] & (1UL << ((v) % LONG_BITS_G))) != 0)
extern oned_csr_graph g;
extern int64_t *pred;
extern int64_t *frontier;
extern int64_t *frontier_next;
#define BLOCK_X 64
// do_nothing kernel
// for bottom up
// one thread do one vertex
// one thread try to find parent in its all neighbour
void dim_do_nothing(dim3* dimGrid, dim3 *dimBlock) {
*dimGrid = dim3((g.nlocalverts + BLOCK_X - 1) / BLOCK_X, 1, 1); // number of block
*dimBlock = dim3(BLOCK_X, 1, 1); // number of thread per block
}
__global__ void do_nothing(
int64_t *rowstarts_g,
int64_t *column_g,
int64_t *frontier_g,
int64_t *frontier_next_g,
int64_t *pred_g,
Context context) {
set_context(context);
const int block_base = blockIdx.x * blockDim.x;
const int64_t i = block_base + threadIdx.x;
__syncthreads();
if (i >= nlocalverts_g)
return ;
if (pred_g[i] == -1) {
int j;
for (j = (int) rowstarts_g[i]; j < rowstarts_g[i + 1]; j++) {
int64_t parent_global = column_g[j];
if (TEST_GLOBAL_G(parent_global, frontier_g)) {
pred_g[i] = parent_global;
SET_GLOBAL_ATOMIC_G(VERTEX_TO_GLOBAL_G(rank_g, i), frontier_next_g);
break;
}
}
}
}
int64_t *rowstarts_g;
int size_rowstarts;
int64_t *column_g;
int size_column;
int64_t *pred_g;
int size_pred_g;
int64_t *frontier_g;
int size_frontier_g;
int64_t *frontier_next_g;
int size_frontier_next_g;
// transfer graph to gpu global memory
// should perform only once
void init_bottom_up_gpu() {
size_rowstarts = (g.nlocalverts + 1) * sizeof(int64_t);
size_column = g.rowstarts[g.nlocalverts] * sizeof(int64_t);
cudaMalloc((void **)&rowstarts_g, size_rowstarts);
cudaMalloc((void **)&column_g, size_column);
cudaMemcpy(rowstarts_g, g.rowstarts, size_rowstarts, cudaMemcpyHostToDevice);
cudaMemcpy(column_g, g.column, size_column, cudaMemcpyHostToDevice);
// here assume pred always reside in GPU
// from beginning to end
// only when everythiing is done
// transfer pred back to CPU
size_pred_g = g.nlocalverts * sizeof(int64_t);
cudaMalloc((void **)&pred_g, size_pred_g);
// cudaMemcpy(pred_g, pred, size_pred_g, cudaMemcpyHostToDevice);
size_frontier_g = global_long_nb;
cudaMalloc((void **)&frontier_g, size_frontier_g);
size_frontier_next_g = global_long_nb;
cudaMalloc((void **)&frontier_next_g, size_frontier_next_g);
}
void pred_to_gpu() {
cudaMemcpy(pred_g, pred, size_pred_g, cudaMemcpyHostToDevice);
}
void pred_from_gpu() {
cudaMemcpy(pred, pred_g, size_pred_g, cudaMemcpyDeviceToHost);
}
void end_bottom_up_gpu() {
cudaFree(rowstarts_g);
cudaFree(column_g);
cudaFree(pred_g);
cudaFree(frontier_g);
cudaFree(frontier_next_g);
}
void one_step_bottom_up_gpu() {
// transfer current frontier to gpu
cudaMemcpy(frontier_g, frontier, size_frontier_g, cudaMemcpyHostToDevice);
cudaMemset(frontier_next_g, 0, size_frontier_next_g);
// get suitable dim
dim3 dimGrid;
dim3 dimBlock;
dim_do_nothing(&dimGrid, &dimBlock);
// launch gpu kernel
// it should compute frontier_next_g
Context context = { rank, size, lgsize, g.nlocalverts };
do_nothing<<<dimGrid, dimBlock>>>(rowstarts_g, column_g, frontier_g, frontier_next_g, pred_g, context);
cudaMemcpy(frontier_next, frontier_next_g, size_frontier_next_g, cudaMemcpyDeviceToHost);
}
|
e1fc404c244931626dda5de1b44942e7ef44edc1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Compute_psi_phi_Kernel(float* psi, float* phi, const float* gAbsIx, const float* gAbsIy, const float* gIx, const float* gIy, int nPixels, float norm_for_contrast_num, float norm_for_contrast_denom, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
float psi_num = 0, psi_denom = 0;
float phi_num = 0, phi_denom = 0;
if (norm_for_contrast_num == 0)
{
psi_num = 1;
phi_num = 1;
}
else if (norm_for_contrast_num == 1)
{
psi_num = gAbsIx[x];
phi_num = gAbsIy[x];
}
else if (norm_for_contrast_num == 2)
{
psi_num = gAbsIx[x] * gAbsIx[x];
phi_num = gAbsIy[x] * gAbsIy[x];
}
else
{
psi_num = pow(gAbsIx[x], norm_for_contrast_num);
phi_num = pow(gAbsIy[x], norm_for_contrast_num);
}
if (norm_for_contrast_denom == 0)
{
psi_denom = 1;
phi_denom = 1;
}
else if (norm_for_contrast_denom == 1)
{
psi_denom = fabs(gIx[x]) + eps;
phi_denom = fabs(gIy[x]) + eps;
}
else if (norm_for_contrast_denom == 2)
{
psi_denom = gIx[x] * gIx[x] + eps;
phi_denom = gIy[x] * gIy[x] + eps;
}
else
{
psi_denom = pow(fabs(gIx[x]), norm_for_contrast_denom) + eps;
phi_denom = pow(fabs(gIy[x]), norm_for_contrast_denom) + eps;
}
psi[x] = psi_num / psi_denom;
phi[x] = phi_num / phi_denom;
}
|
e1fc404c244931626dda5de1b44942e7ef44edc1.cu
|
#include "includes.h"
__global__ void Compute_psi_phi_Kernel(float* psi, float* phi, const float* gAbsIx, const float* gAbsIy, const float* gIx, const float* gIy, int nPixels, float norm_for_contrast_num, float norm_for_contrast_denom, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
float psi_num = 0, psi_denom = 0;
float phi_num = 0, phi_denom = 0;
if (norm_for_contrast_num == 0)
{
psi_num = 1;
phi_num = 1;
}
else if (norm_for_contrast_num == 1)
{
psi_num = gAbsIx[x];
phi_num = gAbsIy[x];
}
else if (norm_for_contrast_num == 2)
{
psi_num = gAbsIx[x] * gAbsIx[x];
phi_num = gAbsIy[x] * gAbsIy[x];
}
else
{
psi_num = pow(gAbsIx[x], norm_for_contrast_num);
phi_num = pow(gAbsIy[x], norm_for_contrast_num);
}
if (norm_for_contrast_denom == 0)
{
psi_denom = 1;
phi_denom = 1;
}
else if (norm_for_contrast_denom == 1)
{
psi_denom = fabs(gIx[x]) + eps;
phi_denom = fabs(gIy[x]) + eps;
}
else if (norm_for_contrast_denom == 2)
{
psi_denom = gIx[x] * gIx[x] + eps;
phi_denom = gIy[x] * gIy[x] + eps;
}
else
{
psi_denom = pow(fabs(gIx[x]), norm_for_contrast_denom) + eps;
phi_denom = pow(fabs(gIy[x]), norm_for_contrast_denom) + eps;
}
psi[x] = psi_num / psi_denom;
phi[x] = phi_num / phi_denom;
}
|
13c37a30a4170684e1f5a86512c219b878e9010b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void t_sum(float *a, float *out, int n_elements)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0) out[0] = 0;
__syncthreads();
if (i < n_elements)
atomicAdd(out, a[i]);
}
|
13c37a30a4170684e1f5a86512c219b878e9010b.cu
|
#include "includes.h"
__global__ void t_sum(float *a, float *out, int n_elements)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i == 0) out[0] = 0;
__syncthreads();
if (i < n_elements)
atomicAdd(out, a[i]);
}
|
5c403481e8eceb611dfd7b272e29fa96fbb0ea65.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_spp.h"
#include "hip/hip_fp16.h"
namespace anakin {
namespace saber {
#if 0
template <typename Dtype>
__global__ void ker_concat_fwd(Dtype* out_data, const Dtype* in_data,
const int n,
const int w,
const int n_stride, const int nthreads) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n_id = index / w;
const int w_id = index % w;
const int out_index = n_id * n_stride + w_id;
out_data[out_index] = in_data[index];
}
}
template <DataType OpDtype,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberSpp<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
SPPParam<OpTensor>& param) {
const InDataType* in_data = inputs[0]->data();
OutDataType* out_data = outputs[0]->mutable_data();
hipStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int out_n = outputs[0]->num();
int out_c = outputs[0]->channel();
int out_h = outputs[0]->height();
int out_w = outputs[0]->width();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
std::vector<OpTensor*> pool_outputs;
pool_outputs.resize(1);
for (int i = 0; i < param.pyramid_height; i++) {
pool_outputs[0] = _pooling_output[i];
(*_pooling[i])(inputs, pool_outputs, _pooling_param[i], this->_ctx);
int valid_size = pool_outputs[0]->valid_size();
int offset = (pow(4, i) - 1) / 3;
hipLaunchKernelGGL(( ker_concat_fwd<InDataType>), dim3(CUDA_GET_BLOCKS(valid_size)),dim3(CUDA_NUM_THREADS), 0, cuda_stream,
out_data + offset,
pool_outputs[0]->data(),
pool_outputs[0]->num() * pool_outputs[0]->channel(),
pool_outputs[0]->height() * pool_outputs[0]->width(),
outputs[0]->width(),
valid_size);
}
}
return SaberSuccess;
}
#endif
} //namespace saber
} //namespace anakin
|
5c403481e8eceb611dfd7b272e29fa96fbb0ea65.cu
|
#include "saber/funcs/impl/cuda/saber_spp.h"
#include "cuda_fp16.h"
namespace anakin {
namespace saber {
#if 0
template <typename Dtype>
__global__ void ker_concat_fwd(Dtype* out_data, const Dtype* in_data,
const int n,
const int w,
const int n_stride, const int nthreads) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n_id = index / w;
const int w_id = index % w;
const int out_index = n_id * n_stride + w_id;
out_data[out_index] = in_data[index];
}
}
template <DataType OpDtype,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberSpp<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\
const std::vector<DataTensor_in *>& inputs, \
std::vector<DataTensor_out *>& outputs, \
SPPParam<OpTensor>& param) {
const InDataType* in_data = inputs[0]->data();
OutDataType* out_data = outputs[0]->mutable_data();
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
int count = outputs[0]->valid_size();
int out_n = outputs[0]->num();
int out_c = outputs[0]->channel();
int out_h = outputs[0]->height();
int out_w = outputs[0]->width();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) {
std::vector<OpTensor*> pool_outputs;
pool_outputs.resize(1);
for (int i = 0; i < param.pyramid_height; i++) {
pool_outputs[0] = _pooling_output[i];
(*_pooling[i])(inputs, pool_outputs, _pooling_param[i], this->_ctx);
int valid_size = pool_outputs[0]->valid_size();
int offset = (pow(4, i) - 1) / 3;
ker_concat_fwd<InDataType><<<CUDA_GET_BLOCKS(valid_size),CUDA_NUM_THREADS, 0, cuda_stream>>>(
out_data + offset,
pool_outputs[0]->data(),
pool_outputs[0]->num() * pool_outputs[0]->channel(),
pool_outputs[0]->height() * pool_outputs[0]->width(),
outputs[0]->width(),
valid_size);
}
}
return SaberSuccess;
}
#endif
} //namespace saber
} //namespace anakin
|
787fd8c91bd7ee1f2a91dd3b18455ba8eeb0431e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
#ifdef USE_ROCM
template <typename _ValueType>
__global__ void gold_tensor_rank1_mul_kenel(_ValueType *p_dst, _ValueType *p1, _ValueType *p2, int_t count){
for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
p_dst[i] = p1[i] * p2[i];
}
}
template <typename _ValueType>
void bm_gold_cu_tensor_rank1_mul(benchmark::State& state) {
cuda::tensor<_ValueType, 1> ts0(state.range(0));
cuda::tensor<_ValueType, 1> ts1(state.range(0));
fill(ts0, zero<_ValueType>::value());
fill(ts1, zero<_ValueType>::value());
while (state.KeepRunning()) {
cuda::tensor<_ValueType, 1> ts_re(ts0.shape());
cuda::execution_policy policy;
cuda::configure_grid(policy, gold_tensor_rank1_mul_kenel<_ValueType>);
hipLaunchKernelGGL(( gold_tensor_rank1_mul_kenel), dim3(policy.grid_size()),
dim3( policy.block_size()),
policy.shared_mem_bytes(),
policy.stream() , ts_re.data(), ts0.data(), ts1.data(), ts_re.size());
hip::device_synchronize();
benchmark::ClobberMemory();
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(_ValueType);
state.SetBytesProcessed(state.iterations() * bytes_size * 3);
state.SetItemsProcessed(state.iterations() * bytes_size);
}
#define BM_GOLD_CU_TENSOR_RANK1_MUL(ValueType) \
auto bm_gold_cu_tensor_##ValueType##_rank1_mul = bm_gold_cu_tensor_rank1_mul<ValueType>; \
BENCHMARK(bm_gold_cu_tensor_##ValueType##_rank1_mul)->RangeMultiplier(bm_config::range_multiplier<ValueType, 1, device_tag>())->Range(bm_config::min_shape<ValueType, 1, device_tag>(), bm_config::max_shape<ValueType, 1, device_tag>())->UseRealTime();
BM_GOLD_CU_TENSOR_RANK1_MUL(byte)
BM_GOLD_CU_TENSOR_RANK1_MUL(int16_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(int32_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(int64_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(float)
BM_GOLD_CU_TENSOR_RANK1_MUL(double)
BM_GOLD_CU_TENSOR_RANK1_MUL(point3f)
BM_GOLD_CU_TENSOR_RANK1_MUL(point4f)
BM_GOLD_CU_TENSOR_RANK1_MUL(hete_float32x4_t)
#endif
#ifdef USE_HOST
template <typename _ValueType>
void bm_gold_host_tensor_rank1_mul(benchmark::State &state) {
tensor<_ValueType, 1> ts0(state.range(0));
tensor<_ValueType, 1> ts1(state.range(0));
fill(ts0, zero<_ValueType>::value());
fill(ts1, zero<_ValueType>::value());
while (state.KeepRunning()) {
tensor<_ValueType, 1> ts_re(ts0.shape());
for (int_t i = 0, size = ts_re.size(); i < size; i += 1024) {
auto b = ts0[i + 1024];
auto c = ts1[i + 1024];
for (int_t j = 0; j < 1024; ++j) {
ts_re[i+j] = ts0[i+j] * ts1[i+j];
}
}
benchmark::ClobberMemory();
}
size_t element_size = ts0.size();
auto bytes_size = element_size * sizeof(decltype(ts0[0]));
state.SetBytesProcessed(state.iterations() * bytes_size * 3);
state.SetItemsProcessed(state.iterations() * element_size);
}
#define BM_GOLD_HOST_TENSOR_RANK1_MUL(ValueType) \
auto bm_gold_host_tensor_##ValueType##_rank1_mul = bm_gold_host_tensor_rank1_mul<ValueType>; \
BENCHMARK(bm_gold_host_tensor_##ValueType##_rank1_mul)->RangeMultiplier(bm_config::range_multiplier<ValueType, 1, host_tag>())->Range(bm_config::min_shape<ValueType, 1, host_tag>(), bm_config::max_shape<ValueType, 1, host_tag>())->UseRealTime();
BM_GOLD_HOST_TENSOR_RANK1_MUL(byte)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int16_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int32_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int64_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(float)
BM_GOLD_HOST_TENSOR_RANK1_MUL(double)
BM_GOLD_HOST_TENSOR_RANK1_MUL(point3f)
BM_GOLD_HOST_TENSOR_RANK1_MUL(point4f)
BM_GOLD_HOST_TENSOR_RANK1_MUL(hete_float32x4_t)
#endif
#define BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(OpName, Op) \
template <typename _Tensor> \
void bm_hete_tensor_##OpName(benchmark::State &state) { \
_Tensor ts0(pointi<_Tensor::rank>::all(state.range(0))); \
_Tensor ts1(ts0.shape()); \
fill(ts0, zero<typename _Tensor::value_type>::value()); \
fill(ts1, zero<typename _Tensor::value_type>::value()); \
decltype((ts0 Op ts1).persist()) ts_re(ts0.shape()); \
\
while (state.KeepRunning()) { \
copy(ts0 Op ts1, ts_re); \
HETE_SYNCHRONIZE; \
\
benchmark::ClobberMemory(); \
} \
\
auto bytes_size = static_cast<size_t>(ts_re.size()) * sizeof(decltype(ts_re[0])); \
state.SetBytesProcessed(state.iterations() * bytes_size * 2); \
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts0.size())); \
}
//Arithmetic
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(add, +)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(sub, -)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(mul, *)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(div, /)
//Mod
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(mod, %)
//Bit
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(left_shift, <<)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(right_shift, >>)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_or, |)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_and, &)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_xor, ^)
//Logic
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(or , ||)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(and, &&)
//Compapre
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(gt, >)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(lt, <)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(ge, >=)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(le, <=)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(equal, ==)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(not_equal, !=)
#define BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, Rank) \
auto bm_hete_tensor_##ValueType##_rank##Rank##_##OpName = bm_hete_tensor_##OpName<HETE_TENSOR<ValueType, Rank>>; \
BENCHMARK(bm_hete_tensor_##ValueType##_rank##Rank##_##OpName)->RangeMultiplier(bm_config::range_multiplier<ValueType, Rank, HETE_TAG>())->Range(bm_config::min_shape<ValueType, Rank, HETE_TAG>(), bm_config::max_shape<ValueType, Rank, HETE_TAG>())->UseRealTime();
#define BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, ValueType) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 1) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 2) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 3) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 4)
#define BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(OpName) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, byte) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int16_t) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int32_t) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int64_t)
#define BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(OpName) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, float) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, double) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, point3f) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, point4f) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, hete_float32x4_t)
//Arithmetic
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(add)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(sub)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(mul)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(div)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(mod)
//Bit
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(left_shift)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(right_shift)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_or)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_and)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_xor)
//Logic
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(or)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(and)
//Compapre
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(gt)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(lt)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(ge)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(le)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(equal)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(not_equal)
//Arithmetic
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(add)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(sub)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(mul)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(div)
//Compapre
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(gt)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(lt)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(ge)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(le)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(equal)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(not_equal)
|
787fd8c91bd7ee1f2a91dd3b18455ba8eeb0431e.cu
|
#include <benchmark/benchmark.h>
#include <bm_config.hpp>
#include <matazure/tensor>
using namespace matazure;
#ifdef USE_CUDA
template <typename _ValueType>
__global__ void gold_tensor_rank1_mul_kenel(_ValueType *p_dst, _ValueType *p1, _ValueType *p2, int_t count){
for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
p_dst[i] = p1[i] * p2[i];
}
}
template <typename _ValueType>
void bm_gold_cu_tensor_rank1_mul(benchmark::State& state) {
cuda::tensor<_ValueType, 1> ts0(state.range(0));
cuda::tensor<_ValueType, 1> ts1(state.range(0));
fill(ts0, zero<_ValueType>::value());
fill(ts1, zero<_ValueType>::value());
while (state.KeepRunning()) {
cuda::tensor<_ValueType, 1> ts_re(ts0.shape());
cuda::execution_policy policy;
cuda::configure_grid(policy, gold_tensor_rank1_mul_kenel<_ValueType>);
gold_tensor_rank1_mul_kenel<<< policy.grid_size(),
policy.block_size(),
policy.shared_mem_bytes(),
policy.stream() >>>(ts_re.data(), ts0.data(), ts1.data(), ts_re.size());
cuda::device_synchronize();
benchmark::ClobberMemory();
}
auto bytes_size = static_cast<size_t>(ts0.size()) * sizeof(_ValueType);
state.SetBytesProcessed(state.iterations() * bytes_size * 3);
state.SetItemsProcessed(state.iterations() * bytes_size);
}
#define BM_GOLD_CU_TENSOR_RANK1_MUL(ValueType) \
auto bm_gold_cu_tensor_##ValueType##_rank1_mul = bm_gold_cu_tensor_rank1_mul<ValueType>; \
BENCHMARK(bm_gold_cu_tensor_##ValueType##_rank1_mul)->RangeMultiplier(bm_config::range_multiplier<ValueType, 1, device_tag>())->Range(bm_config::min_shape<ValueType, 1, device_tag>(), bm_config::max_shape<ValueType, 1, device_tag>())->UseRealTime();
BM_GOLD_CU_TENSOR_RANK1_MUL(byte)
BM_GOLD_CU_TENSOR_RANK1_MUL(int16_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(int32_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(int64_t)
BM_GOLD_CU_TENSOR_RANK1_MUL(float)
BM_GOLD_CU_TENSOR_RANK1_MUL(double)
BM_GOLD_CU_TENSOR_RANK1_MUL(point3f)
BM_GOLD_CU_TENSOR_RANK1_MUL(point4f)
BM_GOLD_CU_TENSOR_RANK1_MUL(hete_float32x4_t)
#endif
#ifdef USE_HOST
template <typename _ValueType>
void bm_gold_host_tensor_rank1_mul(benchmark::State &state) {
tensor<_ValueType, 1> ts0(state.range(0));
tensor<_ValueType, 1> ts1(state.range(0));
fill(ts0, zero<_ValueType>::value());
fill(ts1, zero<_ValueType>::value());
while (state.KeepRunning()) {
tensor<_ValueType, 1> ts_re(ts0.shape());
for (int_t i = 0, size = ts_re.size(); i < size; i += 1024) {
auto b = ts0[i + 1024];
auto c = ts1[i + 1024];
for (int_t j = 0; j < 1024; ++j) {
ts_re[i+j] = ts0[i+j] * ts1[i+j];
}
}
benchmark::ClobberMemory();
}
size_t element_size = ts0.size();
auto bytes_size = element_size * sizeof(decltype(ts0[0]));
state.SetBytesProcessed(state.iterations() * bytes_size * 3);
state.SetItemsProcessed(state.iterations() * element_size);
}
#define BM_GOLD_HOST_TENSOR_RANK1_MUL(ValueType) \
auto bm_gold_host_tensor_##ValueType##_rank1_mul = bm_gold_host_tensor_rank1_mul<ValueType>; \
BENCHMARK(bm_gold_host_tensor_##ValueType##_rank1_mul)->RangeMultiplier(bm_config::range_multiplier<ValueType, 1, host_tag>())->Range(bm_config::min_shape<ValueType, 1, host_tag>(), bm_config::max_shape<ValueType, 1, host_tag>())->UseRealTime();
BM_GOLD_HOST_TENSOR_RANK1_MUL(byte)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int16_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int32_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(int64_t)
BM_GOLD_HOST_TENSOR_RANK1_MUL(float)
BM_GOLD_HOST_TENSOR_RANK1_MUL(double)
BM_GOLD_HOST_TENSOR_RANK1_MUL(point3f)
BM_GOLD_HOST_TENSOR_RANK1_MUL(point4f)
BM_GOLD_HOST_TENSOR_RANK1_MUL(hete_float32x4_t)
#endif
#define BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(OpName, Op) \
template <typename _Tensor> \
void bm_hete_tensor_##OpName(benchmark::State &state) { \
_Tensor ts0(pointi<_Tensor::rank>::all(state.range(0))); \
_Tensor ts1(ts0.shape()); \
fill(ts0, zero<typename _Tensor::value_type>::value()); \
fill(ts1, zero<typename _Tensor::value_type>::value()); \
decltype((ts0 Op ts1).persist()) ts_re(ts0.shape()); \
\
while (state.KeepRunning()) { \
copy(ts0 Op ts1, ts_re); \
HETE_SYNCHRONIZE; \
\
benchmark::ClobberMemory(); \
} \
\
auto bytes_size = static_cast<size_t>(ts_re.size()) * sizeof(decltype(ts_re[0])); \
state.SetBytesProcessed(state.iterations() * bytes_size * 2); \
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts0.size())); \
}
//Arithmetic
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(add, +)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(sub, -)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(mul, *)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(div, /)
//Mod
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(mod, %)
//Bit
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(left_shift, <<)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(right_shift, >>)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_or, |)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_and, &)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(bit_xor, ^)
//Logic
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(or , ||)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(and, &&)
//Compapre
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(gt, >)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(lt, <)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(ge, >=)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(le, <=)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(equal, ==)
BM_HETE_TENSOR_BINARY_OPERATOR_FUNC(not_equal, !=)
#define BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, Rank) \
auto bm_hete_tensor_##ValueType##_rank##Rank##_##OpName = bm_hete_tensor_##OpName<HETE_TENSOR<ValueType, Rank>>; \
BENCHMARK(bm_hete_tensor_##ValueType##_rank##Rank##_##OpName)->RangeMultiplier(bm_config::range_multiplier<ValueType, Rank, HETE_TAG>())->Range(bm_config::min_shape<ValueType, Rank, HETE_TAG>(), bm_config::max_shape<ValueType, Rank, HETE_TAG>())->UseRealTime();
#define BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, ValueType) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 1) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 2) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 3) \
BM_HETE_TENSOR_BINARY_OPERATOR(OpName, ValueType, 4)
#define BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(OpName) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, byte) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int16_t) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int32_t) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, int64_t)
#define BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(OpName) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, float) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, double) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, point3f) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, point4f) \
BM_HETE_TENSOR_RANK1234_BINARY_OPERATOR(OpName, hete_float32x4_t)
//Arithmetic
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(add)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(sub)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(mul)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(div)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(mod)
//Bit
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(left_shift)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(right_shift)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_or)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_and)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(bit_xor)
//Logic
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(or)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(and)
//Compapre
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(gt)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(lt)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(ge)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(le)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(equal)
BM_HETE_TENSOR_INTEGRAL_TYPES_RANK1234_BINARY_OPERATOR(not_equal)
//Arithmetic
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(add)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(sub)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(mul)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(div)
//Compapre
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(gt)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(lt)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(ge)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(le)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(equal)
BM_HETE_TENSOR_TYPES_RANK1234_BINARY_OPERATOR_FLOATING(not_equal)
|
89b6771a53150111f592b392670f10c168b4b650.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2015-2019, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <forge.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define USE_FORGE_CUDA_COPY_HELPERS
#include <ComputeCopy.h>
#include <cstdio>
#include <iostream>
const unsigned DIMX = 1000;
const unsigned DIMY = 800;
static const float DX = 0.1f;
static const float FRANGE_START = 0.f;
static const float FRANGE_END = 2 * 3.141592f;
static const size_t DATA_SIZE = (size_t)((FRANGE_END - FRANGE_START) / DX);
hiprandState_t* state;
void kernel(float* dev_out, int functionCode,
float* colors, float* alphas, float* radii);
inline int divup(int a, int b)
{
return (a+b-1)/b;
}
__global__
void setupRandomKernel(hiprandState_t *states, unsigned long long seed)
{
unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init(seed, tid, 0, &states[tid]);
}
int main(void)
{
FORGE_CUDA_CHECK(hipMalloc((void **)&state, DATA_SIZE*sizeof(hiprandState_t)));
hipLaunchKernelGGL(( setupRandomKernel) , dim3(divup(DATA_SIZE,32)), dim3(32) , 0, 0, state, 314567);
float *cos_out;
float *tan_out;
float *colors_out;
float *alphas_out;
float *radii_out;
FORGE_CUDA_CHECK(hipMalloc((void**)&cos_out, sizeof(float) * DATA_SIZE * 2));
FORGE_CUDA_CHECK(hipMalloc((void**)&tan_out, sizeof(float) * DATA_SIZE * 2));
FORGE_CUDA_CHECK(hipMalloc((void**)&colors_out, sizeof(float) * DATA_SIZE * 3));
FORGE_CUDA_CHECK(hipMalloc((void**)&alphas_out, sizeof(float) * DATA_SIZE));
FORGE_CUDA_CHECK(hipMalloc((void**)&radii_out, sizeof(float) * DATA_SIZE));
/*
* First Forge call should be a window creation call
* so that necessary OpenGL context is created for any
* other forge::* object to be created successfully
*/
forge::Window wnd(DIMX, DIMY, "Bubble chart with Transparency Demo");
wnd.makeCurrent();
forge::Chart chart(FG_CHART_2D);
chart.setAxesLimits(FRANGE_START, FRANGE_END, -1.0f, 1.0f);
/* Create several plot objects which creates the necessary
* vertex buffer objects to hold the different plot types
*/
forge::Plot plt1 = chart.plot(DATA_SIZE, forge::f32, FG_PLOT_LINE, FG_MARKER_TRIANGLE);
forge::Plot plt2 = chart.plot(DATA_SIZE, forge::f32, FG_PLOT_LINE, FG_MARKER_CIRCLE);
/* Set plot colors */
plt1.setColor(FG_RED);
plt2.setColor(FG_GREEN); //use a forge predefined color
/* Set plot legends */
plt1.setLegend("Cosine");
plt2.setLegend("Tangent");
/* set plot global marker size */
plt1.setMarkerSize(20);
/* copy your data into the opengl buffer object exposed by
* forge::Plot class and then proceed to rendering.
* To help the users with copying the data from compute
* memory to display memory, Forge provides copy headers
* along with the library to help with this task
*/
GfxHandle* handles[5];
// create GL-CUDA interop buffers
createGLBuffer(&handles[0], plt1.vertices(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[1], plt2.vertices(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[2], plt2.colors(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[3], plt2.alphas(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[4], plt2.radii(), FORGE_VERTEX_BUFFER);
kernel(cos_out, 0, NULL, NULL, NULL);
kernel(tan_out, 1, colors_out, alphas_out, radii_out);
// copy the data from compute buffer to graphics buffer
copyToGLBuffer(handles[0], (ComputeResourceHandle)cos_out, plt1.verticesSize());
copyToGLBuffer(handles[1], (ComputeResourceHandle)tan_out, plt2.verticesSize());
/* update color value for tan graph */
copyToGLBuffer(handles[2], (ComputeResourceHandle)colors_out, plt2.colorsSize());
/* update alpha values for tan graph */
copyToGLBuffer(handles[3], (ComputeResourceHandle)alphas_out, plt2.alphasSize());
/* update marker sizes for tan graph markers */
copyToGLBuffer(handles[4], (ComputeResourceHandle)radii_out, plt2.radiiSize());
do {
wnd.draw(chart);
} while(!wnd.close());
// destroy GL-CUDA Interop buffer
releaseGLBuffer(handles[0]);
releaseGLBuffer(handles[1]);
releaseGLBuffer(handles[2]);
releaseGLBuffer(handles[3]);
releaseGLBuffer(handles[4]);
// destroy CUDA handles
FORGE_CUDA_CHECK(hipFree(cos_out));
FORGE_CUDA_CHECK(hipFree(tan_out));
FORGE_CUDA_CHECK(hipFree(colors_out));
FORGE_CUDA_CHECK(hipFree(alphas_out));
FORGE_CUDA_CHECK(hipFree(radii_out));
return 0;
}
__global__
void mapKernel(float* out, int functionCode, float frange_start, float dx)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = frange_start + id*dx;
float y;
switch(functionCode) {
case 0: y = cos(x); break;
case 1: y = tan(x); break;
default: y = sin(x); break;
}
out[2*id+0] = x;
out[2*id+1] = y;
}
__global__
void colorsKernel(float* colors, hiprandState_t *states)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
colors[3*id+0] = hiprand_uniform(&states[id]);
colors[3*id+1] = hiprand_uniform(&states[id]);
colors[3*id+2] = hiprand_uniform(&states[id]);
}
__global__
void randKernel(float* out, hiprandState_t *states, float min, float scale)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
out[id] = hiprand_uniform(&states[id])*scale + min;
}
void kernel(float* dev_out, int functionCode,
float* colors, float* alphas, float* radii)
{
static const dim3 threads(32);
dim3 blocks(divup(DATA_SIZE, 32));
hipLaunchKernelGGL(( mapKernel), dim3(blocks), dim3(threads) , 0, 0, dev_out, functionCode, FRANGE_START, DX);
if (colors)
hipLaunchKernelGGL(( colorsKernel), dim3(blocks), dim3(threads) , 0, 0, colors, state);
if (alphas)
hipLaunchKernelGGL(( randKernel), dim3(blocks), dim3(threads) , 0, 0, alphas, state, 0, 1);
if (radii)
hipLaunchKernelGGL(( randKernel), dim3(blocks), dim3(threads) , 0, 0, radii, state, 20, 60);
}
|
89b6771a53150111f592b392670f10c168b4b650.cu
|
/*******************************************************
* Copyright (c) 2015-2019, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <forge.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#define USE_FORGE_CUDA_COPY_HELPERS
#include <ComputeCopy.h>
#include <cstdio>
#include <iostream>
const unsigned DIMX = 1000;
const unsigned DIMY = 800;
static const float DX = 0.1f;
static const float FRANGE_START = 0.f;
static const float FRANGE_END = 2 * 3.141592f;
static const size_t DATA_SIZE = (size_t)((FRANGE_END - FRANGE_START) / DX);
curandState_t* state;
void kernel(float* dev_out, int functionCode,
float* colors, float* alphas, float* radii);
inline int divup(int a, int b)
{
return (a+b-1)/b;
}
__global__
void setupRandomKernel(curandState *states, unsigned long long seed)
{
unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;
curand_init(seed, tid, 0, &states[tid]);
}
int main(void)
{
FORGE_CUDA_CHECK(cudaMalloc((void **)&state, DATA_SIZE*sizeof(curandState_t)));
setupRandomKernel <<< divup(DATA_SIZE,32), 32 >>> (state, 314567);
float *cos_out;
float *tan_out;
float *colors_out;
float *alphas_out;
float *radii_out;
FORGE_CUDA_CHECK(cudaMalloc((void**)&cos_out, sizeof(float) * DATA_SIZE * 2));
FORGE_CUDA_CHECK(cudaMalloc((void**)&tan_out, sizeof(float) * DATA_SIZE * 2));
FORGE_CUDA_CHECK(cudaMalloc((void**)&colors_out, sizeof(float) * DATA_SIZE * 3));
FORGE_CUDA_CHECK(cudaMalloc((void**)&alphas_out, sizeof(float) * DATA_SIZE));
FORGE_CUDA_CHECK(cudaMalloc((void**)&radii_out, sizeof(float) * DATA_SIZE));
/*
* First Forge call should be a window creation call
* so that necessary OpenGL context is created for any
* other forge::* object to be created successfully
*/
forge::Window wnd(DIMX, DIMY, "Bubble chart with Transparency Demo");
wnd.makeCurrent();
forge::Chart chart(FG_CHART_2D);
chart.setAxesLimits(FRANGE_START, FRANGE_END, -1.0f, 1.0f);
/* Create several plot objects which creates the necessary
* vertex buffer objects to hold the different plot types
*/
forge::Plot plt1 = chart.plot(DATA_SIZE, forge::f32, FG_PLOT_LINE, FG_MARKER_TRIANGLE);
forge::Plot plt2 = chart.plot(DATA_SIZE, forge::f32, FG_PLOT_LINE, FG_MARKER_CIRCLE);
/* Set plot colors */
plt1.setColor(FG_RED);
plt2.setColor(FG_GREEN); //use a forge predefined color
/* Set plot legends */
plt1.setLegend("Cosine");
plt2.setLegend("Tangent");
/* set plot global marker size */
plt1.setMarkerSize(20);
/* copy your data into the opengl buffer object exposed by
* forge::Plot class and then proceed to rendering.
* To help the users with copying the data from compute
* memory to display memory, Forge provides copy headers
* along with the library to help with this task
*/
GfxHandle* handles[5];
// create GL-CUDA interop buffers
createGLBuffer(&handles[0], plt1.vertices(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[1], plt2.vertices(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[2], plt2.colors(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[3], plt2.alphas(), FORGE_VERTEX_BUFFER);
createGLBuffer(&handles[4], plt2.radii(), FORGE_VERTEX_BUFFER);
kernel(cos_out, 0, NULL, NULL, NULL);
kernel(tan_out, 1, colors_out, alphas_out, radii_out);
// copy the data from compute buffer to graphics buffer
copyToGLBuffer(handles[0], (ComputeResourceHandle)cos_out, plt1.verticesSize());
copyToGLBuffer(handles[1], (ComputeResourceHandle)tan_out, plt2.verticesSize());
/* update color value for tan graph */
copyToGLBuffer(handles[2], (ComputeResourceHandle)colors_out, plt2.colorsSize());
/* update alpha values for tan graph */
copyToGLBuffer(handles[3], (ComputeResourceHandle)alphas_out, plt2.alphasSize());
/* update marker sizes for tan graph markers */
copyToGLBuffer(handles[4], (ComputeResourceHandle)radii_out, plt2.radiiSize());
do {
wnd.draw(chart);
} while(!wnd.close());
// destroy GL-CUDA Interop buffer
releaseGLBuffer(handles[0]);
releaseGLBuffer(handles[1]);
releaseGLBuffer(handles[2]);
releaseGLBuffer(handles[3]);
releaseGLBuffer(handles[4]);
// destroy CUDA handles
FORGE_CUDA_CHECK(cudaFree(cos_out));
FORGE_CUDA_CHECK(cudaFree(tan_out));
FORGE_CUDA_CHECK(cudaFree(colors_out));
FORGE_CUDA_CHECK(cudaFree(alphas_out));
FORGE_CUDA_CHECK(cudaFree(radii_out));
return 0;
}
__global__
void mapKernel(float* out, int functionCode, float frange_start, float dx)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = frange_start + id*dx;
float y;
switch(functionCode) {
case 0: y = cos(x); break;
case 1: y = tan(x); break;
default: y = sin(x); break;
}
out[2*id+0] = x;
out[2*id+1] = y;
}
__global__
void colorsKernel(float* colors, curandState *states)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
colors[3*id+0] = curand_uniform(&states[id]);
colors[3*id+1] = curand_uniform(&states[id]);
colors[3*id+2] = curand_uniform(&states[id]);
}
__global__
void randKernel(float* out, curandState *states, float min, float scale)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
out[id] = curand_uniform(&states[id])*scale + min;
}
void kernel(float* dev_out, int functionCode,
float* colors, float* alphas, float* radii)
{
static const dim3 threads(32);
dim3 blocks(divup(DATA_SIZE, 32));
mapKernel<<< blocks, threads >>>(dev_out, functionCode, FRANGE_START, DX);
if (colors)
colorsKernel<<< blocks, threads >>>(colors, state);
if (alphas)
randKernel<<< blocks, threads >>>(alphas, state, 0, 1);
if (radii)
randKernel<<< blocks, threads >>>(radii, state, 20, 60);
}
|
8885c05e140823b3cf568a067b1a5cd170c0b176.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel2;
int xdim0_reset_field_kernel2_h = -1;
__constant__ int ydim0_reset_field_kernel2;
int ydim0_reset_field_kernel2_h = -1;
__constant__ int xdim1_reset_field_kernel2;
int xdim1_reset_field_kernel2_h = -1;
__constant__ int ydim1_reset_field_kernel2;
int ydim1_reset_field_kernel2_h = -1;
__constant__ int xdim2_reset_field_kernel2;
int xdim2_reset_field_kernel2_h = -1;
__constant__ int ydim2_reset_field_kernel2;
int ydim2_reset_field_kernel2_h = -1;
__constant__ int xdim3_reset_field_kernel2;
int xdim3_reset_field_kernel2_h = -1;
__constant__ int ydim3_reset_field_kernel2;
int ydim3_reset_field_kernel2_h = -1;
__constant__ int xdim4_reset_field_kernel2;
int xdim4_reset_field_kernel2_h = -1;
__constant__ int ydim4_reset_field_kernel2;
int ydim4_reset_field_kernel2_h = -1;
__constant__ int xdim5_reset_field_kernel2;
int xdim5_reset_field_kernel2_h = -1;
__constant__ int ydim5_reset_field_kernel2;
int ydim5_reset_field_kernel2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y, z) \
(x + xdim0_reset_field_kernel2 * (y) + \
xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_reset_field_kernel2 * (y) + \
xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_reset_field_kernel2 * (y) + \
xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_reset_field_kernel2 * (y) + \
xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_reset_field_kernel2 * (y) + \
xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_reset_field_kernel2 * (y) + \
xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2 * (z))
// user function
__device__
void
reset_field_kernel2(double *xvel0, const double *xvel1, double *yvel0,
const double *yvel1, double *zvel0,
const double *zvel1) {
xvel0[OPS_ACC0(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, 0)];
yvel0[OPS_ACC2(0, 0, 0)] = yvel1[OPS_ACC3(0, 0, 0)];
zvel0[OPS_ACC4(0, 0, 0)] = zvel1[OPS_ACC5(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_reset_field_kernel2(double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel2 +
idx_z * 1 * 1 * xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel2 +
idx_z * 1 * 1 * xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel2 +
idx_z * 1 * 1 * xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel2 +
idx_z * 1 * 1 * xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_reset_field_kernel2 +
idx_z * 1 * 1 * xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_reset_field_kernel2 +
idx_z * 1 * 1 * xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel2(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 2))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(2, "reset_field_kernel2");
OPS_kernels[2].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel2_h ||
ydim0 != ydim0_reset_field_kernel2_h ||
xdim1 != xdim1_reset_field_kernel2_h ||
ydim1 != ydim1_reset_field_kernel2_h ||
xdim2 != xdim2_reset_field_kernel2_h ||
ydim2 != ydim2_reset_field_kernel2_h ||
xdim3 != xdim3_reset_field_kernel2_h ||
ydim3 != ydim3_reset_field_kernel2_h ||
xdim4 != xdim4_reset_field_kernel2_h ||
ydim4 != ydim4_reset_field_kernel2_h ||
xdim5 != xdim5_reset_field_kernel2_h ||
ydim5 != ydim5_reset_field_kernel2_h) {
hipMemcpyToSymbol(xdim0_reset_field_kernel2, &xdim0, sizeof(int));
xdim0_reset_field_kernel2_h = xdim0;
hipMemcpyToSymbol(ydim0_reset_field_kernel2, &ydim0, sizeof(int));
ydim0_reset_field_kernel2_h = ydim0;
hipMemcpyToSymbol(xdim1_reset_field_kernel2, &xdim1, sizeof(int));
xdim1_reset_field_kernel2_h = xdim1;
hipMemcpyToSymbol(ydim1_reset_field_kernel2, &ydim1, sizeof(int));
ydim1_reset_field_kernel2_h = ydim1;
hipMemcpyToSymbol(xdim2_reset_field_kernel2, &xdim2, sizeof(int));
xdim2_reset_field_kernel2_h = xdim2;
hipMemcpyToSymbol(ydim2_reset_field_kernel2, &ydim2, sizeof(int));
ydim2_reset_field_kernel2_h = ydim2;
hipMemcpyToSymbol(xdim3_reset_field_kernel2, &xdim3, sizeof(int));
xdim3_reset_field_kernel2_h = xdim3;
hipMemcpyToSymbol(ydim3_reset_field_kernel2, &ydim3, sizeof(int));
ydim3_reset_field_kernel2_h = ydim3;
hipMemcpyToSymbol(xdim4_reset_field_kernel2, &xdim4, sizeof(int));
xdim4_reset_field_kernel2_h = xdim4;
hipMemcpyToSymbol(ydim4_reset_field_kernel2, &ydim4, sizeof(int));
ydim4_reset_field_kernel2_h = ydim4;
hipMemcpyToSymbol(xdim5_reset_field_kernel2, &xdim5, sizeof(int));
xdim5_reset_field_kernel2_h = xdim5;
hipMemcpyToSymbol(ydim5_reset_field_kernel2, &ydim5, sizeof(int));
ydim5_reset_field_kernel2_h = ydim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[2].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_reset_field_kernel2), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[2].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[2].mpi_time += t2 - t1;
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
|
8885c05e140823b3cf568a067b1a5cd170c0b176.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel2;
int xdim0_reset_field_kernel2_h = -1;
__constant__ int ydim0_reset_field_kernel2;
int ydim0_reset_field_kernel2_h = -1;
__constant__ int xdim1_reset_field_kernel2;
int xdim1_reset_field_kernel2_h = -1;
__constant__ int ydim1_reset_field_kernel2;
int ydim1_reset_field_kernel2_h = -1;
__constant__ int xdim2_reset_field_kernel2;
int xdim2_reset_field_kernel2_h = -1;
__constant__ int ydim2_reset_field_kernel2;
int ydim2_reset_field_kernel2_h = -1;
__constant__ int xdim3_reset_field_kernel2;
int xdim3_reset_field_kernel2_h = -1;
__constant__ int ydim3_reset_field_kernel2;
int ydim3_reset_field_kernel2_h = -1;
__constant__ int xdim4_reset_field_kernel2;
int xdim4_reset_field_kernel2_h = -1;
__constant__ int ydim4_reset_field_kernel2;
int ydim4_reset_field_kernel2_h = -1;
__constant__ int xdim5_reset_field_kernel2;
int xdim5_reset_field_kernel2_h = -1;
__constant__ int ydim5_reset_field_kernel2;
int ydim5_reset_field_kernel2_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y, z) \
(x + xdim0_reset_field_kernel2 * (y) + \
xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_reset_field_kernel2 * (y) + \
xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_reset_field_kernel2 * (y) + \
xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_reset_field_kernel2 * (y) + \
xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_reset_field_kernel2 * (y) + \
xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_reset_field_kernel2 * (y) + \
xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2 * (z))
// user function
__device__
void
reset_field_kernel2(double *xvel0, const double *xvel1, double *yvel0,
const double *yvel1, double *zvel0,
const double *zvel1) {
xvel0[OPS_ACC0(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, 0)];
yvel0[OPS_ACC2(0, 0, 0)] = yvel1[OPS_ACC3(0, 0, 0)];
zvel0[OPS_ACC4(0, 0, 0)] = zvel1[OPS_ACC5(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void
ops_reset_field_kernel2(double *__restrict arg0, const double *__restrict arg1,
double *__restrict arg2, const double *__restrict arg3,
double *__restrict arg4, const double *__restrict arg5,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel2 +
idx_z * 1 * 1 * xdim0_reset_field_kernel2 * ydim0_reset_field_kernel2;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel2 +
idx_z * 1 * 1 * xdim1_reset_field_kernel2 * ydim1_reset_field_kernel2;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel2 +
idx_z * 1 * 1 * xdim2_reset_field_kernel2 * ydim2_reset_field_kernel2;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel2 +
idx_z * 1 * 1 * xdim3_reset_field_kernel2 * ydim3_reset_field_kernel2;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_reset_field_kernel2 +
idx_z * 1 * 1 * xdim4_reset_field_kernel2 * ydim4_reset_field_kernel2;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_reset_field_kernel2 +
idx_z * 1 * 1 * xdim5_reset_field_kernel2 * ydim5_reset_field_kernel2;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel2(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
void ops_par_loop_reset_field_kernel2(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 6, range, 2))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(2, "reset_field_kernel2");
OPS_kernels[2].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel2_h ||
ydim0 != ydim0_reset_field_kernel2_h ||
xdim1 != xdim1_reset_field_kernel2_h ||
ydim1 != ydim1_reset_field_kernel2_h ||
xdim2 != xdim2_reset_field_kernel2_h ||
ydim2 != ydim2_reset_field_kernel2_h ||
xdim3 != xdim3_reset_field_kernel2_h ||
ydim3 != ydim3_reset_field_kernel2_h ||
xdim4 != xdim4_reset_field_kernel2_h ||
ydim4 != ydim4_reset_field_kernel2_h ||
xdim5 != xdim5_reset_field_kernel2_h ||
ydim5 != ydim5_reset_field_kernel2_h) {
cudaMemcpyToSymbol(xdim0_reset_field_kernel2, &xdim0, sizeof(int));
xdim0_reset_field_kernel2_h = xdim0;
cudaMemcpyToSymbol(ydim0_reset_field_kernel2, &ydim0, sizeof(int));
ydim0_reset_field_kernel2_h = ydim0;
cudaMemcpyToSymbol(xdim1_reset_field_kernel2, &xdim1, sizeof(int));
xdim1_reset_field_kernel2_h = xdim1;
cudaMemcpyToSymbol(ydim1_reset_field_kernel2, &ydim1, sizeof(int));
ydim1_reset_field_kernel2_h = ydim1;
cudaMemcpyToSymbol(xdim2_reset_field_kernel2, &xdim2, sizeof(int));
xdim2_reset_field_kernel2_h = xdim2;
cudaMemcpyToSymbol(ydim2_reset_field_kernel2, &ydim2, sizeof(int));
ydim2_reset_field_kernel2_h = ydim2;
cudaMemcpyToSymbol(xdim3_reset_field_kernel2, &xdim3, sizeof(int));
xdim3_reset_field_kernel2_h = xdim3;
cudaMemcpyToSymbol(ydim3_reset_field_kernel2, &ydim3, sizeof(int));
ydim3_reset_field_kernel2_h = ydim3;
cudaMemcpyToSymbol(xdim4_reset_field_kernel2, &xdim4, sizeof(int));
xdim4_reset_field_kernel2_h = xdim4;
cudaMemcpyToSymbol(ydim4_reset_field_kernel2, &ydim4, sizeof(int));
ydim4_reset_field_kernel2_h = ydim4;
cudaMemcpyToSymbol(xdim5_reset_field_kernel2, &xdim5, sizeof(int));
xdim5_reset_field_kernel2_h = xdim5;
cudaMemcpyToSymbol(ydim5_reset_field_kernel2, &ydim5, sizeof(int));
ydim5_reset_field_kernel2_h = ydim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[6];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[2].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_reset_field_kernel2<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[2].time += t1 - t2;
}
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[4], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[2].mpi_time += t2 - t1;
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
|
34085f3186446239f5a1ac6db9adb2790d84d5b9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <omp.h>
#include "cuda_util.h"
#include <thrust/device_vector.h>
#define THREAD_NUM 128
#define BLOCKS_SCHED 2
#define SIZE_WARP 32
__device__ inline int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__global__ void computeCgh(int * vetor, int * result, int n, int rank, int size) {
int total_comp = n*(n+1)/2;
int tid = ((total_comp/size)*rank) + threadIdx.x + blockIdx.x * blockDim.x; //Identificao da thread;
int g = row_index(tid,n);
int h = column_index(tid,n);
int max_so_far = INT_MIN, max_ending_here = INT_MIN;
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
if(tid < total_comp && h >= g) {
//printf("rank=%d tid=%d Cgh=%d,%d\n",rank,tid,g,h);
for(int i = 0; i < n; i++) {
int value = vetor[i*n + h] - (g == 0 ? 0 : vetor[i*n + g - 1]);
if(max_ending_here < 0) {
max_ending_here = value;
}
else {
max_ending_here += value;
}
if(max_ending_here >= max_so_far ) {
max_so_far = max_ending_here;
}
}
atomicMax(&max_block[0],max_so_far);
}
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
int main() {
int n;
scanf("%d",&n);
int sqrN = n*n;
int * v;
int * keys;
//device 0 by default
hipMallocManaged(&v, sqrN*sizeof(int));
hipMallocManaged(&keys, sqrN*sizeof(int));
int j = 0;
for(int i = 1; i < sqrN+1; i++)
{
keys[i-1] = j;
scanf("%d",&v[i-1]);
if(i % n == 0)
j++;
}
/*for(int i = 1; i <= sqrN; i++) {
printf ("%d ",v[i-1]);
if(i % n == 0)
printf("\n");
}
printf("\n");*/
int size;
HANDLE_ERROR( hipGetDeviceCount(&size));
int * globalmax;
hipMallocManaged(&globalmax, size*sizeof(int));
#pragma omp parallel num_threads(size) default(shared)
{
const int rank = omp_get_thread_num();
int partition = n % size != 0 ? (int)(n/size) + 1 : n/size;
partition = partition != 0 ? partition : 1;
HANDLE_ERROR( hipSetDevice(rank) );
int total_comp = n*(n+1)/2;
unsigned tnumb = total_comp / THREAD_NUM > 0 ? THREAD_NUM : 32;
unsigned bnumb = ((int)(total_comp / tnumb / size)) + 1;
dim3 threadsPorBloco(tnumb);
dim3 blocosPorGrid(bnumb);
int startIndex = rank * partition * n;
int endIndex = rank != size - 1 ? (rank+1) * partition * n : sqrN + 1;
//printf("rank=%d partition=%d si=%d ei=%d\n",rank,partition,startIndex,endIndex);
float time;
hipEvent_t start,stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, 0) );
thrust::inclusive_scan_by_key(keys + startIndex, keys + endIndex, v + startIndex ,v + startIndex);
CudaCheckError();
#pragma omp barrier
hipLaunchKernelGGL(( computeCgh), dim3(blocosPorGrid),dim3(threadsPorBloco),sizeof(int), 0, v,&globalmax[rank],n, rank, size);
HANDLE_ERROR( hipDeviceSynchronize() );
#pragma omp barrier
for(int i = 1; i < size; i++) {
if(globalmax[i] > globalmax[0]) {
globalmax[0] = globalmax[i];
}
}
HANDLE_ERROR( hipEventRecord(stop, 0) );
HANDLE_ERROR( hipEventSynchronize(stop) );
HANDLE_ERROR( hipEventElapsedTime(&time, start, stop) );
#pragma omp single
{
//printf("\nO resultado e: %d\n",globalmax[0]);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", time);
printf("mmax2d_multigpu_um: %d, tempo: %.9fms\n",globalmax[0],time);
}
}
hipSetDevice(0);
/*for(int i = 1; i <= sqrN; i++) {
printf ("%d ",v[i-1]);
if(i % n == 0)
printf("\n");
}
printf("\n");
for(int i = 0; i < size; i++) {
printf("%d\n",globalmax[i]);
}*/
hipFree(v);
hipFree(keys);
hipFree(globalmax);
return 0;
}
|
34085f3186446239f5a1ac6db9adb2790d84d5b9.cu
|
#include <stdio.h>
#include <omp.h>
#include "cuda_util.h"
#include <thrust/device_vector.h>
#define THREAD_NUM 128
#define BLOCKS_SCHED 2
#define SIZE_WARP 32
__device__ inline int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__ inline int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__global__ void computeCgh(int * vetor, int * result, int n, int rank, int size) {
int total_comp = n*(n+1)/2;
int tid = ((total_comp/size)*rank) + threadIdx.x + blockIdx.x * blockDim.x; //Identificação da thread;
int g = row_index(tid,n);
int h = column_index(tid,n);
int max_so_far = INT_MIN, max_ending_here = INT_MIN;
extern __shared__ int max_block[];
if(threadIdx.x == 0)
max_block[0] = INT_MIN;
__syncthreads();
if(tid < total_comp && h >= g) {
//printf("rank=%d tid=%d Cgh=%d,%d\n",rank,tid,g,h);
for(int i = 0; i < n; i++) {
int value = vetor[i*n + h] - (g == 0 ? 0 : vetor[i*n + g - 1]);
if(max_ending_here < 0) {
max_ending_here = value;
}
else {
max_ending_here += value;
}
if(max_ending_here >= max_so_far ) {
max_so_far = max_ending_here;
}
}
atomicMax(&max_block[0],max_so_far);
}
__syncthreads();
if(threadIdx.x == 0)
atomicMax(&result[0],max_block[0]);
}
int main() {
int n;
scanf("%d",&n);
int sqrN = n*n;
int * v;
int * keys;
//device 0 by default
cudaMallocManaged(&v, sqrN*sizeof(int));
cudaMallocManaged(&keys, sqrN*sizeof(int));
int j = 0;
for(int i = 1; i < sqrN+1; i++)
{
keys[i-1] = j;
scanf("%d",&v[i-1]);
if(i % n == 0)
j++;
}
/*for(int i = 1; i <= sqrN; i++) {
printf ("%d ",v[i-1]);
if(i % n == 0)
printf("\n");
}
printf("\n");*/
int size;
HANDLE_ERROR( cudaGetDeviceCount(&size));
int * globalmax;
cudaMallocManaged(&globalmax, size*sizeof(int));
#pragma omp parallel num_threads(size) default(shared)
{
const int rank = omp_get_thread_num();
int partition = n % size != 0 ? (int)(n/size) + 1 : n/size;
partition = partition != 0 ? partition : 1;
HANDLE_ERROR( cudaSetDevice(rank) );
int total_comp = n*(n+1)/2;
unsigned tnumb = total_comp / THREAD_NUM > 0 ? THREAD_NUM : 32;
unsigned bnumb = ((int)(total_comp / tnumb / size)) + 1;
dim3 threadsPorBloco(tnumb);
dim3 blocosPorGrid(bnumb);
int startIndex = rank * partition * n;
int endIndex = rank != size - 1 ? (rank+1) * partition * n : sqrN + 1;
//printf("rank=%d partition=%d si=%d ei=%d\n",rank,partition,startIndex,endIndex);
float time;
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, 0) );
thrust::inclusive_scan_by_key(keys + startIndex, keys + endIndex, v + startIndex ,v + startIndex);
CudaCheckError();
#pragma omp barrier
computeCgh<<<blocosPorGrid,threadsPorBloco,sizeof(int)>>>(v,&globalmax[rank],n, rank, size);
HANDLE_ERROR( cudaThreadSynchronize() );
#pragma omp barrier
for(int i = 1; i < size; i++) {
if(globalmax[i] > globalmax[0]) {
globalmax[0] = globalmax[i];
}
}
HANDLE_ERROR( cudaEventRecord(stop, 0) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
HANDLE_ERROR( cudaEventElapsedTime(&time, start, stop) );
#pragma omp single
{
//printf("\nO resultado e: %d\n",globalmax[0]);
//printf("O tempo foi de: %.9f ms para a mmax2d\n", time);
printf("mmax2d_multigpu_um: %d, tempo: %.9fms\n",globalmax[0],time);
}
}
cudaSetDevice(0);
/*for(int i = 1; i <= sqrN; i++) {
printf ("%d ",v[i-1]);
if(i % n == 0)
printf("\n");
}
printf("\n");
for(int i = 0; i < size; i++) {
printf("%d\n",globalmax[i]);
}*/
cudaFree(v);
cudaFree(keys);
cudaFree(globalmax);
return 0;
}
|
c05efa4ac0cb12d5950455fcc728f3a2aaa028b7.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include "dali/operators/image/remap/warp_affine_params.h"
namespace dali {
namespace {
template <int ndims>
__global__ void InvertTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> **input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = input[i]->inv();
}
template <int ndims>
__global__ void CopyTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> **input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = *input[i];
}
template <int ndims>
void InvertTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> **input,
int count, hipStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = ::min(count, 512);
hipLaunchKernelGGL(( InvertTransformsKernel<ndims>), dim3(blocks), dim3(threads), 0, stream, output, input, count);
}
template <int ndims>
void CopyTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> **input,
int count, hipStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = ::min(count, 512);
hipLaunchKernelGGL(( CopyTransformsKernel<ndims>), dim3(blocks), dim3(threads), 0, stream, output, input, count);
}
} // namespace
template <>
void CopyTransformsGPU<2, true>(WarpAffineParams<2> *output, const WarpAffineParams<2> **input,
int count, hipStream_t stream) {
InvertTransforms<2>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<3, true>(WarpAffineParams<3> *output, const WarpAffineParams<3> **input,
int count, hipStream_t stream) {
InvertTransforms<3>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<2, false>(WarpAffineParams<2> *output, const WarpAffineParams<2> **input,
int count, hipStream_t stream) {
CopyTransforms<2>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<3, false>(WarpAffineParams<3> *output, const WarpAffineParams<3> **input,
int count, hipStream_t stream) {
CopyTransforms<3>(output, input, count, stream);
}
} // namespace dali
|
c05efa4ac0cb12d5950455fcc728f3a2aaa028b7.cu
|
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include "dali/operators/image/remap/warp_affine_params.h"
namespace dali {
namespace {
template <int ndims>
__global__ void InvertTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> **input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = input[i]->inv();
}
template <int ndims>
__global__ void CopyTransformsKernel(WarpAffineParams<ndims> *output,
const WarpAffineParams<ndims> **input, int count) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x)
output[i] = *input[i];
}
template <int ndims>
void InvertTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> **input,
int count, cudaStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = std::min(count, 512);
InvertTransformsKernel<ndims><<<blocks, threads, 0, stream>>>(output, input, count);
}
template <int ndims>
void CopyTransforms(WarpAffineParams<ndims> *output, const WarpAffineParams<ndims> **input,
int count, cudaStream_t stream) {
int blocks = div_ceil(count, 512);
int threads = std::min(count, 512);
CopyTransformsKernel<ndims><<<blocks, threads, 0, stream>>>(output, input, count);
}
} // namespace
template <>
void CopyTransformsGPU<2, true>(WarpAffineParams<2> *output, const WarpAffineParams<2> **input,
int count, cudaStream_t stream) {
InvertTransforms<2>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<3, true>(WarpAffineParams<3> *output, const WarpAffineParams<3> **input,
int count, cudaStream_t stream) {
InvertTransforms<3>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<2, false>(WarpAffineParams<2> *output, const WarpAffineParams<2> **input,
int count, cudaStream_t stream) {
CopyTransforms<2>(output, input, count, stream);
}
template <>
void CopyTransformsGPU<3, false>(WarpAffineParams<3> *output, const WarpAffineParams<3> **input,
int count, cudaStream_t stream) {
CopyTransforms<3>(output, input, count, stream);
}
} // namespace dali
|
effab4fde14c9848ec55ed44ded91374e0a3fd02.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* earlyDetection.cu
*
* Created on: Sep 16, 2015
* Author: banafsheh
*/
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstring>
#include <vector>
#include "testPolygroup.h"
int earlyDetection(std::vector<double>& inputNeuronVec,
std::vector<double>& inputTimeVec, std::vector<std::string>& currentModel,
double* a, double* d, double dpre[][51], double delay[][23],
double ppre[][51], double post[][23], double pp[][23], double pre[][51],
double s[][23], bool* pngOutput, int anchors[][3], int N, int D,
int T) {
int SIZE=955;
//double* inputNueronArray = new double[inputNeuronVec.size()];
//int* inputTimeArray = new int[inputTimeVec.size()];
double* inputNeur = &inputNeuronVec[0];
double* inputTime = &inputTimeVec[0];
//double* inputNueronArray= new double [inputNeuronVec.size()];
int i = 0, j = 0, k = 0, i1 = 0;
/*for (i = 1; i < 8; i++) {
for (j = i + 1; j < 9; j++) {
for (k = j + 1; k < 10; k++) {
anchors[i1][0] = i;
anchors[i1][1] = j;
anchors[i1][2] = k;
i1++;
}
}
}*/
double* dev_a;
double* dev_d;
double* dev_dpre;
double* dev_delay;
double* dev_ppre;
double* dev_post;
double* dev_pp;
double* dev_pre;
double* dev_s;
bool* dev_pngOutput;
int* dev_anchors;
int* dev_N;
int* dev_D;
int* dev_T;
double* dev_inputTime;
double* dev_inputNeur;
hipError_t err = hipMalloc((void**) &dev_a, 226 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_d, 226 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_dpre, 226 * 51 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_delay, 226 * 23 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_ppre, 226 * 51 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_post, 226 * 23 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_pp, 226 * 23 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_pre, 226 * 51 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_s, 226 * 23 * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_pngOutput, SIZE * sizeof(bool));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_anchors, SIZE * 3 * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_inputNeur, inputNeuronVec.size() * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_inputTime, inputTimeVec.size() * sizeof(double));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_N, 1 * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_D, 1 * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
err = hipMalloc((void**) &dev_T, 1 * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
//todo please complete this part
hipMemcpy(dev_a, a, 226 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_d, d, 226 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_dpre, dpre, 226 * 51 * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_delay, delay, 226 * 23 * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_ppre, ppre, 226 * 51 * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_post, post, 226 * 23 * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_pp, pp, 226 * 23 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_pre, pre, 226 * 51 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_s, s, 226 * 23 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_pngOutput, pngOutput, SIZE * sizeof(bool),
hipMemcpyHostToDevice);
hipMemcpy(dev_anchors, anchors, SIZE * 3 * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(dev_N, &N, 1 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_D, &D, 1 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_T, &T, 1 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_inputTime, inputTime, inputTimeVec.size() * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_inputNeur, inputNeur, inputNeuronVec.size() * sizeof(double),
hipMemcpyHostToDevice);
//(SIZE + 255) / 256, 256
hipLaunchKernelGGL(( testPolygroup), dim3(1), dim3(1), 0, 0, dev_a, dev_d, dev_dpre, dev_delay, dev_ppre,
dev_post, dev_pp, dev_pre, dev_s, dev_pngOutput, dev_anchors, dev_N,
dev_D, dev_T, dev_inputNeur, dev_inputTime);
hipMemcpy(pngOutput, dev_pngOutput, SIZE * sizeof(bool),
hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, end);
std::cout << "/nYay! Your program's results are correct." << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// Cleanup in the event of success.
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(dev_a);
hipFree(dev_d);
hipFree(dev_dpre);
hipFree(dev_delay);
hipFree(dev_ppre);
hipFree(dev_post);
hipFree(dev_pp);
hipFree(dev_pre);
hipFree(dev_s);
hipFree(dev_pngOutput);
hipFree(dev_anchors);
hipFree(dev_N);
hipFree(dev_D);
hipFree(dev_T);
hipFree(dev_inputTime);
hipFree(dev_inputNeur);
return 1;
}
|
effab4fde14c9848ec55ed44ded91374e0a3fd02.cu
|
/*
* earlyDetection.cu
*
* Created on: Sep 16, 2015
* Author: banafsheh
*/
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstring>
#include <vector>
#include "testPolygroup.h"
int earlyDetection(std::vector<double>& inputNeuronVec,
std::vector<double>& inputTimeVec, std::vector<std::string>& currentModel,
double* a, double* d, double dpre[][51], double delay[][23],
double ppre[][51], double post[][23], double pp[][23], double pre[][51],
double s[][23], bool* pngOutput, int anchors[][3], int N, int D,
int T) {
int SIZE=955;
//double* inputNueronArray = new double[inputNeuronVec.size()];
//int* inputTimeArray = new int[inputTimeVec.size()];
double* inputNeur = &inputNeuronVec[0];
double* inputTime = &inputTimeVec[0];
//double* inputNueronArray= new double [inputNeuronVec.size()];
int i = 0, j = 0, k = 0, i1 = 0;
/*for (i = 1; i < 8; i++) {
for (j = i + 1; j < 9; j++) {
for (k = j + 1; k < 10; k++) {
anchors[i1][0] = i;
anchors[i1][1] = j;
anchors[i1][2] = k;
i1++;
}
}
}*/
double* dev_a;
double* dev_d;
double* dev_dpre;
double* dev_delay;
double* dev_ppre;
double* dev_post;
double* dev_pp;
double* dev_pre;
double* dev_s;
bool* dev_pngOutput;
int* dev_anchors;
int* dev_N;
int* dev_D;
int* dev_T;
double* dev_inputTime;
double* dev_inputNeur;
cudaError_t err = cudaMalloc((void**) &dev_a, 226 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_d, 226 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_dpre, 226 * 51 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_delay, 226 * 23 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_ppre, 226 * 51 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_post, 226 * 23 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_pp, 226 * 23 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_pre, 226 * 51 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_s, 226 * 23 * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_pngOutput, SIZE * sizeof(bool));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_anchors, SIZE * 3 * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_inputNeur, inputNeuronVec.size() * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_inputTime, inputTimeVec.size() * sizeof(double));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_N, 1 * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_D, 1 * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
err = cudaMalloc((void**) &dev_T, 1 * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
//todo please complete this part
cudaMemcpy(dev_a, a, 226 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, d, 226 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dpre, dpre, 226 * 51 * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_delay, delay, 226 * 23 * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_ppre, ppre, 226 * 51 * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_post, post, 226 * 23 * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_pp, pp, 226 * 23 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pre, pre, 226 * 51 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_s, s, 226 * 23 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pngOutput, pngOutput, SIZE * sizeof(bool),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_anchors, anchors, SIZE * 3 * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_N, &N, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_D, &D, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_T, &T, 1 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_inputTime, inputTime, inputTimeVec.size() * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_inputNeur, inputNeur, inputNeuronVec.size() * sizeof(double),
cudaMemcpyHostToDevice);
//(SIZE + 255) / 256, 256
testPolygroup<<<1, 1>>>(dev_a, dev_d, dev_dpre, dev_delay, dev_ppre,
dev_post, dev_pp, dev_pre, dev_s, dev_pngOutput, dev_anchors, dev_N,
dev_D, dev_T, dev_inputNeur, dev_inputTime);
cudaMemcpy(pngOutput, dev_pngOutput, SIZE * sizeof(bool),
cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, end);
std::cout << "/nYay! Your program's results are correct." << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// Cleanup in the event of success.
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(dev_a);
cudaFree(dev_d);
cudaFree(dev_dpre);
cudaFree(dev_delay);
cudaFree(dev_ppre);
cudaFree(dev_post);
cudaFree(dev_pp);
cudaFree(dev_pre);
cudaFree(dev_s);
cudaFree(dev_pngOutput);
cudaFree(dev_anchors);
cudaFree(dev_N);
cudaFree(dev_D);
cudaFree(dev_T);
cudaFree(dev_inputTime);
cudaFree(dev_inputNeur);
return 1;
}
|
62907236fe47f34dd889f574d785b27c331de15e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <thrust/window_2d.h>
#include <thrust/block_2d.h>
#include <thrust/window_transform.h>
#define SRAND_VALUE 1985
#define BLOCK_SIZE 16
class ghostRowsFunctor
{
public:
__device__ void operator() (const thrust::window_2d<short> &inputWindow) const
{
inputWindow[inputWindow.block_dim_y-1][0]=inputWindow[make_int2(0,1)];
inputWindow[0][0]=inputWindow[make_int2(0,inputWindow.block_dim_y-2)];
}
};
class ghostColsFunctor
{
public:
__device__ void operator() (const thrust::window_2d<short> &inputWindow) const
{
inputWindow[0][inputWindow.block_dim_x-1]=inputWindow[make_int2(1,0)];
inputWindow[0][0]=inputWindow[make_int2(inputWindow.block_dim_x-2,0)];
}
};
class GOLFunctor
{
public:
__device__ void operator() (thrust::window_2d<short> &inputWindow, thrust::window_2d<short> &outputWindow) const
{
int numNeighbors;
// Get the number of neighbors for a given grid point
numNeighbors = inputWindow[make_int2(0,1)]+inputWindow[make_int2(2,1)]+
inputWindow[make_int2(1,0)]+inputWindow[make_int2(1,2)]+
inputWindow[make_int2(0,0)]+inputWindow[make_int2(2,2)]+
inputWindow[make_int2(0,2)]+inputWindow[make_int2(2,0)];
outputWindow[1][1]=(numNeighbors==3)||(inputWindow[make_int2(1,1)]&&(numNeighbors==2));
}
};
int main(int argc, char* argv[])
{
int i,j,iter;
int dim;
int maxIter = 1<<10; //Number of game steps
if (argc==2)
dim = atoi(argv[1]);
else if (argc==3)
{
dim = atoi(argv[1]);
maxIter = atoi(argv[2]);
}
else
dim = 1024;
thrust::block_2d<short> *d_grid = new thrust::block_2d<short>(dim+2,dim+2);
thrust::block_2d<short> *d_new_grid = new thrust::block_2d<short>(dim+2,dim+2);
thrust::block_2d<short> *d_temp_grid;
thrust::host_block_2d<short> h_grid(dim+2,dim+2);
// Assign initial population randomly
srand(SRAND_VALUE);
for(i = 1; i<=dim; i++) {
for(j = 1; j<=dim; j++) {
h_grid[i][j] = rand() % 2;
// printf("%d ",h_grid[i][j]);
}
// printf("\n");
}
// Copy over initial game grid (Dim-1 threads)
*d_grid = h_grid;
// Main game loop
for (iter = 0; iter<maxIter; iter++) {
thrust::window_vector<short> ghostRowsWindows (d_grid,1,d_grid->dim_y,1,d_grid->dim_y);
thrust::window_vector<short> ghostColsWindows (d_grid,d_grid->dim_x,1,d_grid->dim_x,1);
thrust::for_each(ghostRowsWindows.begin(),ghostRowsWindows.end(),ghostRowsFunctor());
thrust::for_each(ghostColsWindows.begin(),ghostColsWindows.end(),ghostColsFunctor());
thrust::window_vector<short>GOLInputVector(d_grid,3,3,1,1);
thrust::window_vector<short>GOLOutputVector(d_new_grid,3,3,1,1);
thrust::transform(thrust::hip::texture,GOLInputVector.begin(),GOLInputVector.end(),GOLOutputVector.begin(),GOLFunctor());
// ghostRows<<<cpyGridRowsGridSize, cpyBlockSize>>>(dim, d_grid);
// ghostCols<<<cpyGridColsGridSize, cpyBlockSize>>>(dim, d_grid);
// GOL<<<gridSize, blockSize>>>(dim, d_grid, d_newGrid);
// Swap our grids and iterate again
d_temp_grid = d_grid;
d_grid = d_new_grid;
d_new_grid = d_temp_grid;
}//iter loop
// Copy back results and sum
// hipMemcpy(h_grid, d_grid, bytes, hipMemcpyDeviceToHost);
// Sum up alive cells and print results
printf("\n\nOutput \n\n\n");
h_grid = *d_grid;
int total = 0;
for (i = 1; i<=dim; i++) {
for (j = 1; j<=dim; j++) {
// printf("%d ",h_grid[i][j]);
total += h_grid[i][j];
}
// printf("\n");
}
printf("Total Alive: %d\n", total);
// Release memory
return 0;
}
|
62907236fe47f34dd889f574d785b27c331de15e.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <thrust/window_2d.h>
#include <thrust/block_2d.h>
#include <thrust/window_transform.h>
#define SRAND_VALUE 1985
#define BLOCK_SIZE 16
class ghostRowsFunctor
{
public:
__device__ void operator() (const thrust::window_2d<short> &inputWindow) const
{
inputWindow[inputWindow.block_dim_y-1][0]=inputWindow[make_int2(0,1)];
inputWindow[0][0]=inputWindow[make_int2(0,inputWindow.block_dim_y-2)];
}
};
class ghostColsFunctor
{
public:
__device__ void operator() (const thrust::window_2d<short> &inputWindow) const
{
inputWindow[0][inputWindow.block_dim_x-1]=inputWindow[make_int2(1,0)];
inputWindow[0][0]=inputWindow[make_int2(inputWindow.block_dim_x-2,0)];
}
};
class GOLFunctor
{
public:
__device__ void operator() (thrust::window_2d<short> &inputWindow, thrust::window_2d<short> &outputWindow) const
{
int numNeighbors;
// Get the number of neighbors for a given grid point
numNeighbors = inputWindow[make_int2(0,1)]+inputWindow[make_int2(2,1)]+
inputWindow[make_int2(1,0)]+inputWindow[make_int2(1,2)]+
inputWindow[make_int2(0,0)]+inputWindow[make_int2(2,2)]+
inputWindow[make_int2(0,2)]+inputWindow[make_int2(2,0)];
outputWindow[1][1]=(numNeighbors==3)||(inputWindow[make_int2(1,1)]&&(numNeighbors==2));
}
};
int main(int argc, char* argv[])
{
int i,j,iter;
int dim;
int maxIter = 1<<10; //Number of game steps
if (argc==2)
dim = atoi(argv[1]);
else if (argc==3)
{
dim = atoi(argv[1]);
maxIter = atoi(argv[2]);
}
else
dim = 1024;
thrust::block_2d<short> *d_grid = new thrust::block_2d<short>(dim+2,dim+2);
thrust::block_2d<short> *d_new_grid = new thrust::block_2d<short>(dim+2,dim+2);
thrust::block_2d<short> *d_temp_grid;
thrust::host_block_2d<short> h_grid(dim+2,dim+2);
// Assign initial population randomly
srand(SRAND_VALUE);
for(i = 1; i<=dim; i++) {
for(j = 1; j<=dim; j++) {
h_grid[i][j] = rand() % 2;
// printf("%d ",h_grid[i][j]);
}
// printf("\n");
}
// Copy over initial game grid (Dim-1 threads)
*d_grid = h_grid;
// Main game loop
for (iter = 0; iter<maxIter; iter++) {
thrust::window_vector<short> ghostRowsWindows (d_grid,1,d_grid->dim_y,1,d_grid->dim_y);
thrust::window_vector<short> ghostColsWindows (d_grid,d_grid->dim_x,1,d_grid->dim_x,1);
thrust::for_each(ghostRowsWindows.begin(),ghostRowsWindows.end(),ghostRowsFunctor());
thrust::for_each(ghostColsWindows.begin(),ghostColsWindows.end(),ghostColsFunctor());
thrust::window_vector<short>GOLInputVector(d_grid,3,3,1,1);
thrust::window_vector<short>GOLOutputVector(d_new_grid,3,3,1,1);
thrust::transform(thrust::cuda::texture,GOLInputVector.begin(),GOLInputVector.end(),GOLOutputVector.begin(),GOLFunctor());
// ghostRows<<<cpyGridRowsGridSize, cpyBlockSize>>>(dim, d_grid);
// ghostCols<<<cpyGridColsGridSize, cpyBlockSize>>>(dim, d_grid);
// GOL<<<gridSize, blockSize>>>(dim, d_grid, d_newGrid);
// Swap our grids and iterate again
d_temp_grid = d_grid;
d_grid = d_new_grid;
d_new_grid = d_temp_grid;
}//iter loop
// Copy back results and sum
// cudaMemcpy(h_grid, d_grid, bytes, cudaMemcpyDeviceToHost);
// Sum up alive cells and print results
printf("\n\nOutput \n\n\n");
h_grid = *d_grid;
int total = 0;
for (i = 1; i<=dim; i++) {
for (j = 1; j<=dim; j++) {
// printf("%d ",h_grid[i][j]);
total += h_grid[i][j];
}
// printf("\n");
}
printf("Total Alive: %d\n", total);
// Release memory
return 0;
}
|
1204d0da7ffa8acad94da780a25a53b359b1db36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
* This file includes simple demonstrations of a variety of shuffle
* instructions.
*/
#define BDIMX 16
#define SEGM 4
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%2d ", in[i]);
}
printf("\n");
}
__global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane)
{
// int value = d_in[threadIdx.x];
int value = -2;
value = __shfl(value, srcLane, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value = __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value += __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_up (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_down (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value = __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[0] = __shfl_xor (value[0], mask, BDIMX);
value[1] = __shfl_xor (value[1], mask, BDIMX);
value[2] = __shfl_xor (value[2], mask, BDIMX);
value[3] = __shfl_xor (value[3], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int4 value;
value.x = d_in[idx];
value.y = d_in[idx + 1];
value.z = d_in[idx + 2];
value.w = d_in[idx + 3];
value.x = __shfl_xor (value.x, mask, BDIMX);
value.y = __shfl_xor (value.y, mask, BDIMX);
value.z = __shfl_xor (value.z, mask, BDIMX);
value.w = __shfl_xor (value.w, mask, BDIMX);
d_out[idx] = value.x;
d_out[idx + 1] = value.y;
d_out[idx + 2] = value.z;
d_out[idx + 3] = value.w;
}
__global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
bool pred = ((threadIdx.x & 1) != mask);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__inline__ __device__
void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx)
{
bool pred = ((tid / mask + 1) == 1);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
}
__inline__ __device__
void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx)
{
bool pred = ((laneIdx / mask + 1) == 1);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
}
__global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, srcIdx, dstIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__
void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx,
int secondIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, firstIdx, secondIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in,
int const mask, int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_array(int *d_out, int *d_in, int const offset)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
int lane = (offset + threadIdx.x) % SEGM;
value[0] = __shfl (value[3], lane, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value += __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
bool iPrintout = 1;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting.", argv[0]);
printf("at Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = BDIMX;
int h_inData[BDIMX], h_outData[BDIMX];
// for (int i = 0; i < nElem; i++) h_inData[i] = i;
for (int i = 0; i < nElem; i++) h_inData[i] = i*8;
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
size_t nBytes = nElem * sizeof(int);
int *d_inData, *d_outData;
CHECK(hipMalloc((int**)&d_inData, nBytes));
CHECK(hipMalloc((int**)&d_outData, nBytes));
CHECK(hipMemcpy(d_inData, h_inData, nBytes, hipMemcpyHostToDevice));
int block = BDIMX;
// shfl bcast
hipLaunchKernelGGL(( test_shfl_broadcast), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl bcast\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap right\t\t: ");
printData(h_outData, nElem);
}
// shfl up
hipLaunchKernelGGL(( test_shfl_up), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl up \t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap left\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap 2\t\t: ");
printData(h_outData, nElem);
}
// shfl down
hipLaunchKernelGGL(( test_shfl_down), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl down \t\t: ");
printData(h_outData, nElem);
}
// shfl xor
hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl xor 1\t\t: ");
printData(h_outData, nElem);
}
hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -8);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl xor -1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - int4
hipLaunchKernelGGL(( test_shfl_xor_int4), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl int4 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
hipLaunchKernelGGL(( test_shfl_xor_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - test_shfl_xor_element
hipLaunchKernelGGL(( test_shfl_xor_element), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl idx \t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_xor_array_swap_base), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1,
0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap base\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_xor_array_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap 0 3\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
hipLaunchKernelGGL(( test_shfl_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap inline\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
hipLaunchKernelGGL(( test_shfl_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1);
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array \t\t: ");
printData(h_outData, nElem);
}
// finishing
CHECK(hipFree(d_inData));
CHECK(hipFree(d_outData));
CHECK(hipDeviceReset(); );
return EXIT_SUCCESS;
}
|
1204d0da7ffa8acad94da780a25a53b359b1db36.cu
|
#include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
/*
* This file includes simple demonstrations of a variety of shuffle
* instructions.
*/
#define BDIMX 16
#define SEGM 4
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%2d ", in[i]);
}
printf("\n");
}
__global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane)
{
// int value = d_in[threadIdx.x];
int value = -2;
value = __shfl(value, srcLane, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value = __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset)
{
int value = d_in[threadIdx.x];
value += __shfl(value, threadIdx.x + offset, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_up (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta)
{
int value = d_in[threadIdx.x];
value = __shfl_down (value, delta, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value = __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
__global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[0] = __shfl_xor (value[0], mask, BDIMX);
value[1] = __shfl_xor (value[1], mask, BDIMX);
value[2] = __shfl_xor (value[2], mask, BDIMX);
value[3] = __shfl_xor (value[3], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask)
{
int idx = threadIdx.x * SEGM;
int4 value;
value.x = d_in[idx];
value.y = d_in[idx + 1];
value.z = d_in[idx + 2];
value.w = d_in[idx + 3];
value.x = __shfl_xor (value.x, mask, BDIMX);
value.y = __shfl_xor (value.y, mask, BDIMX);
value.z = __shfl_xor (value.z, mask, BDIMX);
value.w = __shfl_xor (value.w, mask, BDIMX);
d_out[idx] = value.x;
d_out[idx + 1] = value.y;
d_out[idx + 2] = value.z;
d_out[idx + 3] = value.w;
}
__global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
bool pred = ((threadIdx.x & 1) != mask);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__inline__ __device__
void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx)
{
bool pred = ((tid / mask + 1) == 1);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
if (pred)
{
int tmp = value[srcIdx];
value[srcIdx] = value[dstIdx];
value[dstIdx] = tmp;
}
}
__inline__ __device__
void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx)
{
bool pred = ((laneIdx / mask + 1) == 1);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX);
if (pred)
{
int tmp = value[firstIdx];
value[firstIdx] = value[secondIdx];
value[secondIdx] = tmp;
}
}
__global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask,
int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, srcIdx, dstIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__
void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx,
int secondIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
swap(value, threadIdx.x, mask, firstIdx, secondIdx);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in,
int const mask, int srcIdx, int dstIdx)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_array(int *d_out, int *d_in, int const offset)
{
int idx = threadIdx.x * SEGM;
int value[SEGM];
for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i];
int lane = (offset + threadIdx.x) % SEGM;
value[0] = __shfl (value[3], lane, BDIMX);
for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i];
}
__global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask)
{
int value = d_in[threadIdx.x];
value += __shfl_xor (value, mask, BDIMX);
d_out[threadIdx.x] = value;
}
int main(int argc, char **argv)
{
int dev = 0;
bool iPrintout = 1;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("> %s Starting.", argv[0]);
printf("at Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = BDIMX;
int h_inData[BDIMX], h_outData[BDIMX];
// for (int i = 0; i < nElem; i++) h_inData[i] = i;
for (int i = 0; i < nElem; i++) h_inData[i] = i*8;
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
size_t nBytes = nElem * sizeof(int);
int *d_inData, *d_outData;
CHECK(cudaMalloc((int**)&d_inData, nBytes));
CHECK(cudaMalloc((int**)&d_outData, nBytes));
CHECK(cudaMemcpy(d_inData, h_inData, nBytes, cudaMemcpyHostToDevice));
int block = BDIMX;
// shfl bcast
test_shfl_broadcast<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl bcast\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, -2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap right\t\t: ");
printData(h_outData, nElem);
}
// shfl up
test_shfl_up<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl up \t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap left\t\t: ");
printData(h_outData, nElem);
}
// shfl offset
test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl wrap 2\t\t: ");
printData(h_outData, nElem);
}
// shfl down
test_shfl_down<<<1, block>>>(d_outData, d_inData, 2);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl down \t\t: ");
printData(h_outData, nElem);
}
// shfl xor
test_shfl_xor<<<1, block>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl xor 1\t\t: ");
printData(h_outData, nElem);
}
test_shfl_xor<<<1, block>>>(d_outData, d_inData, -8);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl xor -1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - int4
test_shfl_xor_int4<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl int4 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
test_shfl_xor_array<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array 1\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - test_shfl_xor_element
test_shfl_xor_element<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl idx \t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_xor_array_swap_base<<<1, block / SEGM>>>(d_outData, d_inData, 1,
0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap base\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_xor_array_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap 0 3\t\t: ");
printData(h_outData, nElem);
}
// shfl xor - swap
test_shfl_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("shfl swap inline\t: ");
printData(h_outData, nElem);
}
// shfl xor - register array
test_shfl_array<<<1, block / SEGM>>>(d_outData, d_inData, 1);
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost));
if(iPrintout)
{
printf("initialData\t\t: ");
printData(h_inData, nElem);
}
if(iPrintout)
{
printf("shfl array \t\t: ");
printData(h_outData, nElem);
}
// finishing
CHECK(cudaFree(d_inData));
CHECK(cudaFree(d_outData));
CHECK(cudaDeviceReset(); );
return EXIT_SUCCESS;
}
|
fe50b31031fb0daca5f849b6d50650a423b70ee5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by raver on 4/9/2018.
//
#include <Environment.h>
#include "../indexreduce.h"
#include <op_boilerplate.h>
#include "../legacy_ops.h"
template <typename T>
static __device__ void indexReduceGeneric(
const int op,
T *dx,
int *xShapeInfo, int xRank,
T *extraParams,
T *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::indexreduce::IndexReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::indexreduce::IndexReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
__global__ void indexReduceDouble(
int op,
double *dx,
int *xShapeInfo, int xRank,
double *extraParams,
double *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<double>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceFloat(
int op,
float *dx,
int *xShapeInfo, int xRank,
float *extraParams,
float *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<float>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceHalf(
int op,
float16 *dx,
int *xShapeInfo, int xRank,
float16 *extraParams,
float16 *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<float16>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
namespace functions {
namespace indexreduce {
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, float *dx, int *xShapeInfo, int xRank, float *extraParams, float *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, double *dx, int *xShapeInfo, int xRank, double *extraParams, double *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum, float16 *dx, int *xShapeInfo, int xRank, float16 *extraParams, float16 *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, float *dx, int *xShapeInfo, int xRank, float *extraParams, float *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, double *dx, int *xShapeInfo, int xRank, double *extraParams, double *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduce(dim3 launchDims, hipStream_t *stream, const int opNum, float16 *dx, int *xShapeInfo, int xRank, float16 *extraParams, float16 *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream,
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(hipStreamSynchronize(*stream));
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::aggregatePartials(IndexValue<T> **sPartialsRef,int tid,int numElements,T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
IndexValue<T> *sPartials = *sPartialsRef;
int floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<T> prev = sPartials[tid - floorPow2];
IndexValue<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<T> curr = sPartials[tid];
IndexValue<T> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename T>
__device__ void IndexReduce<T>::transform(
const int opNum,
T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
DISPATCH_BY_OPNUM(transform, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::transform(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets){
/**
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
//shared memory space for storing intermediate results
IndexValue<T> *sPartials;
sPartials = (IndexValue<T> *)manager->getSharedReductionBuffer(); //holder.getPointer();
// T startingVal = OpType::startingValue(dx);
// IndexValue <T> val = {startingVal, threadIdx.x};
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jIndex xLength;
__shared__ volatile Nd4jIndex resultLength;
//only compute the tad indexes once
IndexValue <T> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
// xElementWiseStride = shape::elementWiseStride(xShapeInfo);
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ Nd4jIndex tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ char tadOrder;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
tadOrder = shape::order(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
IndexValue<T> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = (T) sPartials[threadIdx.x].index;
}
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<T> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = (T) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
//reduce to 1 result
else if (resultScalar) {
Nd4jIndex n = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1) {
for(Nd4jIndex i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue <T> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
int rank = shape::rank(xShapeInfo);
int ind2sub[MAX_RANK];
for(Nd4jIndex i = tid;i < n; i += blockDim.x * gridDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i,ind2sub);
Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),ind2sub,rank);
IndexValue <T> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jIndex i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
result[0] = (T) sPartials[0].index;
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *) reductionBuffer;
tc[16384] = 0;
result[0] = (T) sPartials[0].index;
}
}
}
}
}
}
|
fe50b31031fb0daca5f849b6d50650a423b70ee5.cu
|
//
// Created by raver on 4/9/2018.
//
#include <Environment.h>
#include "../indexreduce.h"
#include <op_boilerplate.h>
#include "../legacy_ops.h"
template <typename T>
static __device__ void indexReduceGeneric(
const int op,
T *dx,
int *xShapeInfo, int xRank,
T *extraParams,
T *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::indexreduce::IndexReduce<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::indexreduce::IndexReduce<T>::transform(
op,
dx,
xShapeInfo,
extraParams,
result,
resultShapeInfo,
dimension,
dimensionLength,
postProcessOrNot,
allocationBuffer,
reductionBuffer,
manager,
tadOnlyShapeInfo,
tadOffsets);
}
__global__ void indexReduceDouble(
int op,
double *dx,
int *xShapeInfo, int xRank,
double *extraParams,
double *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<double>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceFloat(
int op,
float *dx,
int *xShapeInfo, int xRank,
float *extraParams,
float *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<float>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
__global__ void indexReduceHalf(
int op,
float16 *dx,
int *xShapeInfo, int xRank,
float16 *extraParams,
float16 *result,
int *resultShapeInfo, int zRank,
int *dimension,
int dimensionLength,
int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceGeneric<float16>(
op,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
}
namespace functions {
namespace indexreduce {
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, float *dx, int *xShapeInfo, int xRank, float *extraParams, float *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, double *dx, int *xShapeInfo, int xRank, double *extraParams, double *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum, float16 *dx, int *xShapeInfo, int xRank, float16 *extraParams, float16 *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
nullptr, 0,
nullptr,
1,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, float *dx, int *xShapeInfo, int xRank, float *extraParams, float *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<double>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, double *dx, int *xShapeInfo, int xRank, double *extraParams, double *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, double *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <>
_CUDA_H void IndexReduce<float16>::executeIndexReduce(dim3 launchDims, cudaStream_t *stream, const int opNum, float16 *dx, int *xShapeInfo, int xRank, float16 *extraParams, float16 *result, int *resultShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, float16 *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) {
indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>(
opNum,
dx,
xShapeInfo, xRank,
extraParams,
result,
resultShapeInfo, zRank,
dimension,
dimensionLength,
1, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets);
if (nd4j::Environment::getInstance()->isDebug())
checkCudaErrors(cudaStreamSynchronize(*stream));
}
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
template<typename T>
struct SharedIndexValue {
// Ensure that we won't compile any un-specialized types
__device__ T * getPointer() {
extern __device__ void error(void);
error();
return 0;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<float> {
__device__ IndexValue<float> * getPointer() {
extern __shared__ IndexValue<float> s_int2[];
return s_int2;
}
};
// Following are the specializations for the following types.
// int, uint, char, uchar, short, ushort, long long, ulong long, bool, float, and double
// One could also specialize it for user-defined types.
template<>
struct SharedIndexValue<double> {
__device__ IndexValue<double> * getPointer() {
extern __shared__ IndexValue<double> s_int6[];
return s_int6;
}
};
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::aggregatePartials(IndexValue<T> **sPartialsRef,int tid,int numElements,T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
IndexValue<T> *sPartials = *sPartialsRef;
int floorPow2 = blockDim.x;
if (floorPow2 & (floorPow2 - 1)) {
while ( floorPow2 & (floorPow2 - 1) ) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
IndexValue<T> prev = sPartials[tid - floorPow2];
IndexValue<T> curr = sPartials[tid];
sPartials[tid - floorPow2] = OpType::update(prev,curr,extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1;activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numElements) {
IndexValue<T> curr = sPartials[tid];
IndexValue<T> next = sPartials[tid + activeThreads];
sPartials[tid] = OpType::update(curr,next,extraParams);
}
__syncthreads();
}
}
template <typename T>
__device__ void IndexReduce<T>::transform(
const int opNum,
T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
DISPATCH_BY_OPNUM(transform, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, manager, tadShapeInfo, tadOffset), INDEX_REDUCE_OPS);
}
template <typename T>
template <typename OpType>
__device__ void IndexReduce<T>::transform(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
int postProcessOrNot,
int *allocationBuffer,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets){
/**
* Gpu information for the problem
*/
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ volatile int resultScalar;
//shared memory space for storing intermediate results
IndexValue<T> *sPartials;
sPartials = (IndexValue<T> *)manager->getSharedReductionBuffer(); //holder.getPointer();
// T startingVal = OpType::startingValue(dx);
// IndexValue <T> val = {startingVal, threadIdx.x};
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
//length for the tad
__shared__ volatile Nd4jIndex xLength;
__shared__ volatile Nd4jIndex resultLength;
//only compute the tad indexes once
IndexValue <T> reduction = OpType::startingIndexValue(dx);
if (threadIdx.x == 0) {
if (resultShapeInfo != nullptr)
resultLength = shape::length(resultShapeInfo);
else resultLength = 1;
if (dimensionLength == 1) {
if (dimension == nullptr || dimension[0] == MAX_DIMENSION)
resultScalar = 1;
else
resultScalar = 0;
}
else
resultScalar = 0;
if (resultLength == 1)
resultScalar = 1;
// xElementWiseStride = shape::elementWiseStride(xShapeInfo);
xLength = shape::length(xShapeInfo);
}
__syncthreads();
if (!resultScalar) {
__shared__ Nd4jIndex tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ char tadOrder;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
tadOrder = shape::order(tadOnlyShapeInfo);
}
__syncthreads();
if (dimensionLength > 1 || tadEWS < 1) {
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for(int i = threadIdx.x;i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
IndexValue<T> comp {dx[xOffset], i};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[r] = (T) sPartials[threadIdx.x].index;
}
}
} else {
for(int i = blockIdx.x; i < numTads; i+= gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[i];
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (int x = threadIdx.x; x < tadLength; x+= blockDim.x) {
IndexValue<T> comp {dx[tadOffsetForBlock + x * tadEWS], x};
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], comp, extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength),extraParams);
__syncthreads();
if (threadIdx.x == 0) {
result[i] = (T) sPartials[threadIdx.x].index; //postProcess(sPartials[0],tadLength ,extraParams);
}
}
}
}
//reduce to 1 result
else if (resultScalar) {
Nd4jIndex n = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
if(xElementWiseStride >= 1) {
for(Nd4jIndex i = tid;i < n; i += (blockDim.x * gridDim.x)) {
IndexValue <T> indexVal = {dx[i * xElementWiseStride], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
} else {
int rank = shape::rank(xShapeInfo);
int ind2sub[MAX_RANK];
for(Nd4jIndex i = tid;i < n; i += blockDim.x * gridDim.x) {
shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i,ind2sub);
Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),ind2sub,rank);
IndexValue <T> indexVal = {dx[offset], i};
reduction = OpType::update(reduction, indexVal, extraParams);
}
}
sPartials[threadIdx.x] = reduction;
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, (int) n),extraParams);
__syncthreads();
if (gridDim.x > 1) {
__shared__ bool amLast;
unsigned int *tc = (unsigned int *) reductionBuffer;
int rank = shape::rank(xShapeInfo);
tid = threadIdx.x;
if (threadIdx.x == 0) {
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
pBuffer[blockIdx.x] = {sPartials[0].value, sPartials[0].index};
}
__threadfence();
__syncthreads();
if (tid==0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x-1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
IndexValue<T> *pBuffer = (IndexValue<T> *) reductionBuffer;
sPartials[threadIdx.x] = OpType::startingIndexValue(dx);
for (Nd4jIndex i = threadIdx.x; i < gridDim.x; i += blockDim.x) {
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], pBuffer[i], extraParams);
}
__syncthreads();
aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x),extraParams);
__syncthreads();
if (tid == 0) {
result[0] = (T) sPartials[0].index;
}
}
} else {
if (tid == 0) {
unsigned int *tc = (unsigned *) reductionBuffer;
tc[16384] = 0;
result[0] = (T) sPartials[0].index;
}
}
}
}
}
}
|
ab8fdb8900a0b351abdcd81b8f3f0808707a09bc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
__global__ void
sum (float *d_a, float *d_b, float *d_d, const int n) {
// int col = blockIdx.x * blockDim.x + threadIdx.x ;
// int row = blockIdx.y * blockDim.y + threadIdx.y ;
// float sum = 0;
// if (row < n && col < n)
// {
// for (int i = 0 ; i<n ; ++i)
// {
// sum += d_a[row * n + i ] * d_b[i * n + col] ;
// }
// d_c[row * n + col] = sum;
// }
for(int i = 0; i < n*n; i++)
d_d[i] = d_a[i] + d_b[i];
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH],
host_d[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c, *device_d;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
hipMalloc ((void **) &device_a, deviceSize);
hipMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
hipMemcpy (device_a, host_a, deviceSize, hipMemcpyHostToDevice );
hipMemcpy (device_b, host_b, deviceSize, hipMemcpyHostToDevice );
// allocate device memory to store computed result
hipMalloc((void **) &device_c, deviceSize);
hipMalloc((void **) &device_d, deviceSize);
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
hipLaunchKernelGGL(( product), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, device_c, WIDTH);
hipLaunchKernelGGL(( sum), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, device_d, WIDTH);
// copy result from device back to host
hipMemcpy (host_c, device_c, deviceSize, hipMemcpyDeviceToHost);
hipMemcpy (host_d, device_d, deviceSize, hipMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
printf ("A + B: \n");
printMatrix (host_d);
hipFree (device_a);
hipFree (device_b);
hipFree (device_c);
hipFree (device_d);
return 0;
}
|
ab8fdb8900a0b351abdcd81b8f3f0808707a09bc.cu
|
// CS3700 Example matrix multpilcation using GPU
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define TILE_WIDTH 2
#define WIDTH 6
// Kernel function execute by the device (GPU)
__global__ void
product (float *d_a, float *d_b, float *d_c, const int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x ;
int row = blockIdx.y * blockDim.y + threadIdx.y ;
float sum = 0;
if (row < n && col < n) {
for (int i = 0 ; i<n ; ++i) {
sum += d_a[row * n + i ] * d_b[i * n + col] ;
}
d_c[row * n + col] = sum;
}
}
__global__ void
sum (float *d_a, float *d_b, float *d_d, const int n) {
// int col = blockIdx.x * blockDim.x + threadIdx.x ;
// int row = blockIdx.y * blockDim.y + threadIdx.y ;
// float sum = 0;
// if (row < n && col < n)
// {
// for (int i = 0 ; i<n ; ++i)
// {
// sum += d_a[row * n + i ] * d_b[i * n + col] ;
// }
// d_c[row * n + col] = sum;
// }
for(int i = 0; i < n*n; i++)
d_d[i] = d_a[i] + d_b[i];
}
// Utility function to print the input matrix
void printMatrix (float m[][WIDTH]) {
int i, j;
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j< WIDTH; ++j) {
printf ("%d\t", (int)m[i][j]);
}
printf ("\n");
}
}
// Main function execute by the host (CPU)
int main () {
// host matrices
float host_a[WIDTH][WIDTH],
host_b[WIDTH][WIDTH],
host_c[WIDTH][WIDTH],
host_d[WIDTH][WIDTH];
// device arrays
float *device_a, *device_b, *device_c, *device_d;
int i, j;
// initialize host matrices using random numbers
time_t t;
srand ((unsigned) time(&t));
for (i = 0; i<WIDTH; ++i) {
for (j = 0; j<WIDTH; j++) {
host_a[i][j] = (float) (rand() % 50);
host_b[i][j] = (float) (rand() % 50);
}
}
printf ("Matrix A:\n");
printMatrix (host_a);
printf ("\n");
printf ("Matrix B:\n");
printMatrix (host_b);
printf ("\n");
// allocate device memory for input matrices
size_t deviceSize = WIDTH * WIDTH * sizeof (float);
cudaMalloc ((void **) &device_a, deviceSize);
cudaMalloc ((void **) &device_b, deviceSize);
// copy host matrices to device
cudaMemcpy (device_a, host_a, deviceSize, cudaMemcpyHostToDevice );
cudaMemcpy (device_b, host_b, deviceSize, cudaMemcpyHostToDevice );
// allocate device memory to store computed result
cudaMalloc((void **) &device_c, deviceSize);
cudaMalloc((void **) &device_d, deviceSize);
dim3 dimBlock (WIDTH, WIDTH);
dim3 dimGrid (WIDTH/TILE_WIDTH, WIDTH/TILE_WIDTH);
product<<<dimGrid, dimBlock>>> (device_a, device_b, device_c, WIDTH);
sum<<<dimGrid, dimBlock>>> (device_a, device_b, device_d, WIDTH);
// copy result from device back to host
cudaMemcpy (host_c, device_c, deviceSize, cudaMemcpyDeviceToHost);
cudaMemcpy (host_d, device_d, deviceSize, cudaMemcpyDeviceToHost);
// output the computed result matrix
printf ("A x B: \n");
printMatrix (host_c);
printf ("A + B: \n");
printMatrix (host_d);
cudaFree (device_a);
cudaFree (device_b);
cudaFree (device_c);
cudaFree (device_d);
return 0;
}
|
27cd54ba71d88a96e02c702da57c58c6cc1fd9df.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Cuda/ContinuousCollision.cuh>
#include <Cuda/InsertionSort.cuh>
#include <Cuda/Scan.cuh>
#include <Cuda/Reduce.cuh>
namespace {
__device__
CollisionAndTime minCollisionAndTime(CollisionAndTime a, CollisionAndTime b) {
if (a.seconds < b.seconds) {
return a;
} else {
return b;
}
}
__device__
float agentMaxX(Agent& agent, float seconds) {
float maxX = agent.position.x;
if (agent.velocity.x > 0) {
maxX += agent.velocity.x * seconds;
}
return maxX + agent.radius;
}
__device__
float agentMinX(Agent& agent, float seconds) {
float minX = agent.position.x;
if (agent.velocity.x < 0) {
minX += agent.velocity.x * seconds;
}
return minX - agent.radius;
}
__global__
void mapAgentsToMaxXAndIndex(Agent* agents, MaxXAndIndex* maxXAndIndexes, int size, float seconds) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
maxXAndIndexes[gid] = {agentMaxX(agent, seconds), gid};
}
__global__
void mapAgentsToSortedIndex(Agent* in, Agent* out, MaxXAndIndex* maxXAndIndexes, int size) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
out[maxXAndIndexes[gid].index] = in[gid];
}
__device__
float3 quadraticFromFloat2(float2 a) {
return {a.x * a.x, 2 * a.x * a.y, a.y * a.y};
}
__device__
float3 float3Add(float3 a, float3 b) {
return {a.x + b.x, a.y + b.y, a.z + b.z};
}
__device__
float3 float3Scale(float3 a, float b) {
return {a.x * b, a.y * b, a.z * b};
}
__device__
float3 calculateQuadraticEquationOfCollision(Agent& one, Agent& two) {
float radiusSum = one.radius + two.radius;
float radiusSumSquared = radiusSum * radiusSum;
float3 dxt = quadraticFromFloat2({two.velocity.x - one.velocity.x, two.position.x - one.position.x});
float3 dyt = quadraticFromFloat2({two.velocity.y - one.velocity.y, two.position.y - one.position.y});
float3 dzt = quadraticFromFloat2({two.velocity.z - one.velocity.z, two.position.z - one.position.z});
float3 dt = float3Add(float3Add(dxt, dyt), dzt);
dt.z -= radiusSumSquared;
return dt;
}
typedef struct {
int exists;
float sol1;
float sol2;
} QuadraticSolution;
__device__
QuadraticSolution solveQuadraticEquation(float3 q) {
if (q.x == 0) {
return {0, 0, 0};
}
float b2Minus4ac = (q.y * q.y) - (4 * q.x * q.z);
if (b2Minus4ac < 0) {
return {0, 0, 0};
}
float twoA = 2 * q.x;
float sqrtB2Minus4Ac = sqrt(b2Minus4ac);
float sol1 = (-q.y + sqrtB2Minus4Ac) / twoA;
float sol2 = (-q.y - sqrtB2Minus4Ac) / twoA;
return {1, sol1, sol2};
}
__device__
float calculateTimeOfCollision(Agent& one, Agent& two) {
float3 q = calculateQuadraticEquationOfCollision(one, two);
QuadraticSolution qSol = solveQuadraticEquation(q);
if (qSol.exists) {
if (qSol.sol1 < 0) {
return qSol.sol2;
} else {
if (qSol.sol2 < 0) {
return qSol.sol1;
} else {
return min(qSol.sol1, qSol.sol2);
}
}
} else {
return -1;
}
}
__device__
float distanceSquaredBetween(float3 a, float3 b) {
float xd = a.x - b.x;
float yd = a.y - b.y;
float zd = a.z - b.z;
return (xd * xd) + (yd * yd) + (zd * zd);
}
__global__
void detectCollisions(Agent* agents, int size, float seconds, CollisionAndTime* collisions, int* collisionFlags) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
float myMinX = agentMinX(agent, seconds);
int collisionFlag = 0;
CollisionAndTime collision = {-1, -1, seconds};
for (int i = gid - 1; i >= 0; --i) {
Agent other = agents[i];
float otherMaxX = agentMaxX(other, seconds);
if (otherMaxX < myMinX) {
break;
}
float radiusSum = agent.radius + other.radius;
float radiusSumSquared = radiusSum * radiusSum;
// only collide if they are not already intersecting
if (distanceSquaredBetween(agent.position, other.position) > radiusSumSquared) {
float secondsOfCollision = calculateTimeOfCollision(agent, other);
if (secondsOfCollision >= 0 && secondsOfCollision < seconds) {
if (secondsOfCollision < collision.seconds) {
collisionFlag = 1;
collision = {gid, i, secondsOfCollision};
}
}
}
}
collisions[gid] = collision;
collisionFlags[gid] = collisionFlag;
}
__global__
void advanceTime(Agent* agents, int size, float seconds) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
agents[gid].position = float3Add(agent.position, float3Scale(agent.velocity, seconds));
}
} // namespace anonymous
ContinuousCollision::ContinuousCollision(int maxAgents) {
hipMalloc(&m_maxXAndIndexes, maxAgents * sizeof(MaxXAndIndex));
hipMalloc(&m_needsSortingFlag, sizeof(int));
hipMalloc(&m_agentsBuffer, maxAgents * sizeof(Agent));
hipMalloc(&m_collisions, maxAgents * sizeof(CollisionAndTime));
hipMalloc(&m_collisionsBuffer, maxAgents * sizeof(CollisionAndTime));
hipMalloc(&m_collisionFlags, maxAgents * sizeof(int));
hipMalloc(&m_collisionFlagsOffsets, maxAgents * sizeof(int));
}
ContinuousCollision::~ContinuousCollision() {
hipFree(m_maxXAndIndexes);
hipFree(m_needsSortingFlag);
hipFree(m_agentsBuffer);
hipFree(m_collisions);
hipFree(m_collisionsBuffer);
hipFree(m_collisionFlags);
hipFree(m_collisionFlagsOffsets);
}
__device__
int compareMaxXAndIndex(MaxXAndIndex a, MaxXAndIndex b) {
return a.maxX > b.maxX;
}
__device__
int addx(int a, int b) {
return a + b;
}
void ContinuousCollision::collide(Agent* agents, int size, float seconds) {
constexpr int kThreadsPerBlock = 256;
int numBlocks = ceil(size / (float) kThreadsPerBlock);
int hadCollisions = 0;
do {
hipLaunchKernelGGL(( mapAgentsToMaxXAndIndex), dim3(numBlocks), dim3(kThreadsPerBlock), 0, 0, agents, m_maxXAndIndexes, size, seconds);
InsertionSort::sort<MaxXAndIndex, compareMaxXAndIndex>(m_maxXAndIndexes, m_needsSortingFlag, size);
hipLaunchKernelGGL(( mapAgentsToSortedIndex), dim3(numBlocks), dim3(kThreadsPerBlock), 0, 0, agents, m_agentsBuffer, m_maxXAndIndexes, size);
hipLaunchKernelGGL(( detectCollisions), dim3(numBlocks), dim3(kThreadsPerBlock), 0, 0, m_agentsBuffer, size, seconds, m_collisions, m_collisionFlags);
CollisionAndTime earliestCollision = Reduce::reduce<CollisionAndTime, minCollisionAndTime>(m_collisions, m_collisionsBuffer, size);
if (earliestCollision.one != -1) {
hipLaunchKernelGGL(( advanceTime), dim3(numBlocks), dim3(kThreadsPerBlock), 0, 0, m_agentsBuffer, size, earliestCollision.seconds);
// TODO Resolve collision
seconds -= earliestCollision.seconds;
hadCollisions = 1;
} else {
hipLaunchKernelGGL(( advanceTime), dim3(numBlocks), dim3(kThreadsPerBlock), 0, 0, m_agentsBuffer, size, seconds);
hadCollisions = 0;
}
} while (hadCollisions);
}
|
27cd54ba71d88a96e02c702da57c58c6cc1fd9df.cu
|
#include <Cuda/ContinuousCollision.cuh>
#include <Cuda/InsertionSort.cuh>
#include <Cuda/Scan.cuh>
#include <Cuda/Reduce.cuh>
namespace {
__device__
CollisionAndTime minCollisionAndTime(CollisionAndTime a, CollisionAndTime b) {
if (a.seconds < b.seconds) {
return a;
} else {
return b;
}
}
__device__
float agentMaxX(Agent& agent, float seconds) {
float maxX = agent.position.x;
if (agent.velocity.x > 0) {
maxX += agent.velocity.x * seconds;
}
return maxX + agent.radius;
}
__device__
float agentMinX(Agent& agent, float seconds) {
float minX = agent.position.x;
if (agent.velocity.x < 0) {
minX += agent.velocity.x * seconds;
}
return minX - agent.radius;
}
__global__
void mapAgentsToMaxXAndIndex(Agent* agents, MaxXAndIndex* maxXAndIndexes, int size, float seconds) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
maxXAndIndexes[gid] = {agentMaxX(agent, seconds), gid};
}
__global__
void mapAgentsToSortedIndex(Agent* in, Agent* out, MaxXAndIndex* maxXAndIndexes, int size) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
out[maxXAndIndexes[gid].index] = in[gid];
}
__device__
float3 quadraticFromFloat2(float2 a) {
return {a.x * a.x, 2 * a.x * a.y, a.y * a.y};
}
__device__
float3 float3Add(float3 a, float3 b) {
return {a.x + b.x, a.y + b.y, a.z + b.z};
}
__device__
float3 float3Scale(float3 a, float b) {
return {a.x * b, a.y * b, a.z * b};
}
__device__
float3 calculateQuadraticEquationOfCollision(Agent& one, Agent& two) {
float radiusSum = one.radius + two.radius;
float radiusSumSquared = radiusSum * radiusSum;
float3 dxt = quadraticFromFloat2({two.velocity.x - one.velocity.x, two.position.x - one.position.x});
float3 dyt = quadraticFromFloat2({two.velocity.y - one.velocity.y, two.position.y - one.position.y});
float3 dzt = quadraticFromFloat2({two.velocity.z - one.velocity.z, two.position.z - one.position.z});
float3 dt = float3Add(float3Add(dxt, dyt), dzt);
dt.z -= radiusSumSquared;
return dt;
}
typedef struct {
int exists;
float sol1;
float sol2;
} QuadraticSolution;
__device__
QuadraticSolution solveQuadraticEquation(float3 q) {
if (q.x == 0) {
return {0, 0, 0};
}
float b2Minus4ac = (q.y * q.y) - (4 * q.x * q.z);
if (b2Minus4ac < 0) {
return {0, 0, 0};
}
float twoA = 2 * q.x;
float sqrtB2Minus4Ac = sqrt(b2Minus4ac);
float sol1 = (-q.y + sqrtB2Minus4Ac) / twoA;
float sol2 = (-q.y - sqrtB2Minus4Ac) / twoA;
return {1, sol1, sol2};
}
__device__
float calculateTimeOfCollision(Agent& one, Agent& two) {
float3 q = calculateQuadraticEquationOfCollision(one, two);
QuadraticSolution qSol = solveQuadraticEquation(q);
if (qSol.exists) {
if (qSol.sol1 < 0) {
return qSol.sol2;
} else {
if (qSol.sol2 < 0) {
return qSol.sol1;
} else {
return min(qSol.sol1, qSol.sol2);
}
}
} else {
return -1;
}
}
__device__
float distanceSquaredBetween(float3 a, float3 b) {
float xd = a.x - b.x;
float yd = a.y - b.y;
float zd = a.z - b.z;
return (xd * xd) + (yd * yd) + (zd * zd);
}
__global__
void detectCollisions(Agent* agents, int size, float seconds, CollisionAndTime* collisions, int* collisionFlags) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
float myMinX = agentMinX(agent, seconds);
int collisionFlag = 0;
CollisionAndTime collision = {-1, -1, seconds};
for (int i = gid - 1; i >= 0; --i) {
Agent other = agents[i];
float otherMaxX = agentMaxX(other, seconds);
if (otherMaxX < myMinX) {
break;
}
float radiusSum = agent.radius + other.radius;
float radiusSumSquared = radiusSum * radiusSum;
// only collide if they are not already intersecting
if (distanceSquaredBetween(agent.position, other.position) > radiusSumSquared) {
float secondsOfCollision = calculateTimeOfCollision(agent, other);
if (secondsOfCollision >= 0 && secondsOfCollision < seconds) {
if (secondsOfCollision < collision.seconds) {
collisionFlag = 1;
collision = {gid, i, secondsOfCollision};
}
}
}
}
collisions[gid] = collision;
collisionFlags[gid] = collisionFlag;
}
__global__
void advanceTime(Agent* agents, int size, float seconds) {
int tid = threadIdx.x;
int globalOffset = blockDim.x * blockIdx.x;
int gid = globalOffset + tid;
if (gid >= size) { return; }
Agent agent = agents[gid];
agents[gid].position = float3Add(agent.position, float3Scale(agent.velocity, seconds));
}
} // namespace anonymous
ContinuousCollision::ContinuousCollision(int maxAgents) {
cudaMalloc(&m_maxXAndIndexes, maxAgents * sizeof(MaxXAndIndex));
cudaMalloc(&m_needsSortingFlag, sizeof(int));
cudaMalloc(&m_agentsBuffer, maxAgents * sizeof(Agent));
cudaMalloc(&m_collisions, maxAgents * sizeof(CollisionAndTime));
cudaMalloc(&m_collisionsBuffer, maxAgents * sizeof(CollisionAndTime));
cudaMalloc(&m_collisionFlags, maxAgents * sizeof(int));
cudaMalloc(&m_collisionFlagsOffsets, maxAgents * sizeof(int));
}
ContinuousCollision::~ContinuousCollision() {
cudaFree(m_maxXAndIndexes);
cudaFree(m_needsSortingFlag);
cudaFree(m_agentsBuffer);
cudaFree(m_collisions);
cudaFree(m_collisionsBuffer);
cudaFree(m_collisionFlags);
cudaFree(m_collisionFlagsOffsets);
}
__device__
int compareMaxXAndIndex(MaxXAndIndex a, MaxXAndIndex b) {
return a.maxX > b.maxX;
}
__device__
int addx(int a, int b) {
return a + b;
}
void ContinuousCollision::collide(Agent* agents, int size, float seconds) {
constexpr int kThreadsPerBlock = 256;
int numBlocks = ceil(size / (float) kThreadsPerBlock);
int hadCollisions = 0;
do {
mapAgentsToMaxXAndIndex<<<numBlocks, kThreadsPerBlock>>>(agents, m_maxXAndIndexes, size, seconds);
InsertionSort::sort<MaxXAndIndex, compareMaxXAndIndex>(m_maxXAndIndexes, m_needsSortingFlag, size);
mapAgentsToSortedIndex<<<numBlocks, kThreadsPerBlock>>>(agents, m_agentsBuffer, m_maxXAndIndexes, size);
detectCollisions<<<numBlocks, kThreadsPerBlock>>>(m_agentsBuffer, size, seconds, m_collisions, m_collisionFlags);
CollisionAndTime earliestCollision = Reduce::reduce<CollisionAndTime, minCollisionAndTime>(m_collisions, m_collisionsBuffer, size);
if (earliestCollision.one != -1) {
advanceTime<<<numBlocks, kThreadsPerBlock>>>(m_agentsBuffer, size, earliestCollision.seconds);
// TODO Resolve collision
seconds -= earliestCollision.seconds;
hadCollisions = 1;
} else {
advanceTime<<<numBlocks, kThreadsPerBlock>>>(m_agentsBuffer, size, seconds);
hadCollisions = 0;
}
} while (hadCollisions);
}
|
bf6e2d43798ed0aa6e7d4ec26a011b0615a63892.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
// includes, system
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "lbm.h"
#include "main.h"
#ifndef __MCUDA__
#include <hip/hip_runtime.h>
#else
#include <mcuda.h>
#endif
#define ITERS 2
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
// includes, kernels
#include "lbm_kernel.cu"
#define REAL_MARGIN (CALC_INDEX(0, 0, 2, 0) - CALC_INDEX(0,0,0,0))
#define TOTAL_MARGIN (2*PADDED_X*PADDED_Y*N_CELL_ENTRIES)
/******************************************************************************/
void CUDA_LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) {
dim3 dimBlock, dimGrid;
dimBlock.x = SIZE_X;
dimGrid.x = SIZE_Y;
dimGrid.y = SIZE_Z;
dimBlock.y = dimBlock.z = dimGrid.z = 1;
hipLaunchKernelGGL(( performStreamCollide_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, srcGrid, dstGrid);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
*ptr = (float*)malloc( size );
if( ! *ptr ) {
printf( "LBM_allocateGrid: could not allocate %.1f MByte\n",
size / (1024.0*1024.0) );
exit( 1 );
}
memset( *ptr, 0, size );
printf( "LBM_allocateGrid: allocated %.1f MByte\n",
size / (1024.0*1024.0) );
*ptr += REAL_MARGIN;
}
/******************************************************************************/
void CUDA_LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipMalloc((void**)ptr, size);
CUDA_ERRCK;
*ptr += REAL_MARGIN;
}
/*############################################################################*/
void LBM_freeGrid( float** ptr ) {
free( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/******************************************************************************/
void CUDA_LBM_freeGrid( float** ptr ) {
hipFree( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/*############################################################################*/
void LBM_initializeGrid( LBM_Grid grid ) {
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
SRC_C( grid ) = DFL1;
SRC_N( grid ) = DFL2;
SRC_S( grid ) = DFL2;
SRC_E( grid ) = DFL2;
SRC_W( grid ) = DFL2;
SRC_T( grid ) = DFL2;
SRC_B( grid ) = DFL2;
SRC_NE( grid ) = DFL3;
SRC_NW( grid ) = DFL3;
SRC_SE( grid ) = DFL3;
SRC_SW( grid ) = DFL3;
SRC_NT( grid ) = DFL3;
SRC_NB( grid ) = DFL3;
SRC_ST( grid ) = DFL3;
SRC_SB( grid ) = DFL3;
SRC_ET( grid ) = DFL3;
SRC_EB( grid ) = DFL3;
SRC_WT( grid ) = DFL3;
SRC_WB( grid ) = DFL3;
CLEAR_ALL_FLAGS_SWEEP( grid );
SWEEP_END
}
/******************************************************************************/
void CUDA_LBM_initializeGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipMemcpy(*d_grid - REAL_MARGIN, *h_grid - REAL_MARGIN, size, hipMemcpyHostToDevice);
CUDA_ERRCK;
}
void CUDA_LBM_getDeviceGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipDeviceSynchronize();
CUDA_ERRCK;
hipMemcpy(*h_grid - REAL_MARGIN, *d_grid - REAL_MARGIN, size, hipMemcpyDeviceToHost);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_swapGrids( LBM_GridPtr grid1, LBM_GridPtr grid2 ) {
LBM_Grid aux = *grid1;
*grid1 = *grid2;
*grid2 = aux;
}
/*############################################################################*/
void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) {
int x, y, z;
FILE* file = fopen( filename, "rb" );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE );
}
fgetc( file );
}
fgetc( file );
}
fclose( file );
}
/*############################################################################*/
void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) {
int x, y, z;
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ||
z == 0 || z == SIZE_Z-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
}
else {
if( (z == 1 || z == SIZE_Z-2) &&
x > 1 && x < SIZE_X-2 &&
y > 1 && y < SIZE_Y-2 ) {
SET_FLAG( grid, x, y, z, ACCEL );
}
}
}
}
}
}
/*############################################################################*/
void LBM_showGridStatistics( LBM_Grid grid ) {
int nObstacleCells = 0,
nAccelCells = 0,
nFluidCells = 0;
float ux, uy, uz;
float minU2 = 1e+30, maxU2 = -1e+30, u2;
float minRho = 1e+30, maxRho = -1e+30, rho;
float mass = 0;
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
rho = LOCAL( grid, C ) + LOCAL( grid, N )
+ LOCAL( grid, S ) + LOCAL( grid, E )
+ LOCAL( grid, W ) + LOCAL( grid, T )
+ LOCAL( grid, B ) + LOCAL( grid, NE )
+ LOCAL( grid, NW ) + LOCAL( grid, SE )
+ LOCAL( grid, SW ) + LOCAL( grid, NT )
+ LOCAL( grid, NB ) + LOCAL( grid, ST )
+ LOCAL( grid, SB ) + LOCAL( grid, ET )
+ LOCAL( grid, EB ) + LOCAL( grid, WT )
+ LOCAL( grid, WB );
if( rho < minRho ) minRho = rho;
if( rho > maxRho ) maxRho = rho;
mass += rho;
if( TEST_FLAG_SWEEP( grid, OBSTACLE )) {
nObstacleCells++;
}
else {
if( TEST_FLAG_SWEEP( grid, ACCEL ))
nAccelCells++;
else
nFluidCells++;
ux = + LOCAL( grid, E ) - LOCAL( grid, W )
+ LOCAL( grid, NE ) - LOCAL( grid, NW )
+ LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, ET ) + LOCAL( grid, EB )
- LOCAL( grid, WT ) - LOCAL( grid, WB );
uy = + LOCAL( grid, N ) - LOCAL( grid, S )
+ LOCAL( grid, NE ) + LOCAL( grid, NW )
- LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, NT ) + LOCAL( grid, NB )
- LOCAL( grid, ST ) - LOCAL( grid, SB );
uz = + LOCAL( grid, T ) - LOCAL( grid, B )
+ LOCAL( grid, NT ) - LOCAL( grid, NB )
+ LOCAL( grid, ST ) - LOCAL( grid, SB )
+ LOCAL( grid, ET ) - LOCAL( grid, EB )
+ LOCAL( grid, WT ) - LOCAL( grid, WB );
u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);
if( u2 < minU2 ) minU2 = u2;
if( u2 > maxU2 ) maxU2 = u2;
}
SWEEP_END
printf( "LBM_showGridStatistics:\n"
"\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n"
"\tminRho: %8.4f maxRho: %8.4f mass: %e\n"
"\tminU: %e maxU: %e\n\n",
nObstacleCells, nAccelCells, nFluidCells,
minRho, maxRho, mass,
sqrt( minU2 ), sqrt( maxU2 ) );
}
/*############################################################################*/
static void storeValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
const char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
unsigned i;
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1];
fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
}
else { /* little endian */
fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
void LBM_storeVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
OUTPUT_PRECISION rho, ux, uy, uz;
FILE* file = fopen( filename, (binary ? "wb" : "w") );
SWEEP_VAR
SWEEP_START(0,0,0,SIZE_X,SIZE_Y,SIZE_Z)
rho = + SRC_C( grid ) + SRC_N( grid )
+ SRC_S( grid ) + SRC_E( grid )
+ SRC_W( grid ) + SRC_T( grid )
+ SRC_B( grid ) + SRC_NE( grid )
+ SRC_NW( grid ) + SRC_SE( grid )
+ SRC_SW( grid ) + SRC_NT( grid )
+ SRC_NB( grid ) + SRC_ST( grid )
+ SRC_SB( grid ) + SRC_ET( grid )
+ SRC_EB( grid ) + SRC_WT( grid )
+ SRC_WB( grid );
ux = + SRC_E( grid ) - SRC_W( grid )
+ SRC_NE( grid ) - SRC_NW( grid )
+ SRC_SE( grid ) - SRC_SW( grid )
+ SRC_ET( grid ) + SRC_EB( grid )
- SRC_WT( grid ) - SRC_WB( grid );
uy = + SRC_N( grid ) - SRC_S( grid )
+ SRC_NE( grid ) + SRC_NW( grid )
- SRC_SE( grid ) - SRC_SW( grid )
+ SRC_NT( grid ) + SRC_NB( grid )
- SRC_ST( grid ) - SRC_SB( grid );
uz = + SRC_T( grid ) - SRC_B( grid )
+ SRC_NT( grid ) - SRC_NB( grid )
+ SRC_ST( grid ) - SRC_SB( grid )
+ SRC_ET( grid ) - SRC_EB( grid )
+ SRC_WT( grid ) - SRC_WB( grid );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
/*
fwrite( &ux, sizeof( ux ), 1, file );
fwrite( &uy, sizeof( uy ), 1, file );
fwrite( &uz, sizeof( uz ), 1, file );
*/
storeValue( file, &ux );
storeValue( file, &uy );
storeValue( file, &uz );
} else
fprintf( file, "%e %e %e\n", ux, uy, uz );
SWEEP_END;
fclose( file );
}
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
#include "main.h"
/*############################################################################*/
static LBM_Grid CUDA_srcGrid, CUDA_dstGrid;
/*############################################################################*/
//struct pb_TimerSet timers;
int main( int nArgs, char* arg[] ) {
MAIN_Param param;
int t;
//pb_InitializeTimerSet(&timers);
//struct pb_Parameters* params;
//params = pb_ReadParameters(&nArgs, arg);
/* Initialize the parameters structure */
struct pb_Parameters *params = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
params->outFile = NULL;
params->inpFiles = (char **)malloc(sizeof(char *));
params->inpFiles[0] = NULL;
// Read input from command line
#ifdef SIZE0
params->inpFiles[0] = "~/software/parboil-2.5/datasets/lbm/short/input/120_120_150_ldc.of";
#endif
#ifdef SIZE1
params->inpFiles[0] = "~/software/parboil-2.5/datasets/lbm/long/input/120_120_150_ldc.of";
#endif
static LBM_GridPtr TEMP_srcGrid;
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
MAIN_parseCommandLine( nArgs, arg, ¶m, params );
MAIN_printInfo( ¶m );
MAIN_initialize( ¶m );
for( t = 1; t <= param.nTimeSteps; t++ ) {
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
CUDA_LBM_performStreamCollide( CUDA_srcGrid, CUDA_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_swapGrids( &CUDA_srcGrid, &CUDA_dstGrid );
//if( (t & 63) == 0 ) {
printf( "timestep: %i\n", t );
}
MAIN_finalize( ¶m );
LBM_freeGrid( (float**) &TEMP_srcGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
// pb_FreeParameters(params);
return 0;
}
/*############################################################################*/
void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param, struct pb_Parameters * params ) {
struct stat fileStat;
/*
if( nArgs < 2 ) {
printf( "syntax: lbm <time steps>\n" );
exit( 1 );
}
*/
param->nTimeSteps = ITERS;//atoi( arg[1] );
if( params->inpFiles[0] != NULL ) {
param->obstacleFilename = params->inpFiles[0];
if( stat( param->obstacleFilename, &fileStat ) != 0 ) {
printf( "MAIN_parseCommandLine: cannot stat obstacle file '%s'\n",
param->obstacleFilename );
exit( 1 );
}
if( fileStat.st_size != SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ) {
printf( "MAIN_parseCommandLine:\n"
"\tsize of file '%s' is %i bytes\n"
"\texpected size is %i bytes\n",
param->obstacleFilename, (int) fileStat.st_size,
SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z );
exit( 1 );
}
}
else param->obstacleFilename = NULL;
param->resultFilename = params->outFile;
}
/*############################################################################*/
void MAIN_printInfo( const MAIN_Param* param ) {
printf( "MAIN_printInfo:\n"
"\tgrid size : %i x %i x %i = %.2f * 10^6 Cells\n"
"\tnTimeSteps : %i\n"
"\tresult file : %s\n"
"\taction : %s\n"
"\tsimulation type: %s\n"
"\tobstacle file : %s\n\n",
SIZE_X, SIZE_Y, SIZE_Z, 1e-6*SIZE_X*SIZE_Y*SIZE_Z,
param->nTimeSteps, param->resultFilename,
"store", "lid-driven cavity",
(param->obstacleFilename == NULL) ? "<none>" :
param->obstacleFilename );
}
/*############################################################################*/
void MAIN_initialize( const MAIN_Param* param ) {
static LBM_Grid TEMP_srcGrid, TEMP_dstGrid;
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
LBM_allocateGrid( (float**) &TEMP_dstGrid );
LBM_initializeGrid( TEMP_srcGrid );
LBM_initializeGrid( TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
if( param->obstacleFilename != NULL ) {
LBM_loadObstacleFile( TEMP_srcGrid, param->obstacleFilename );
LBM_loadObstacleFile( TEMP_dstGrid, param->obstacleFilename );
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_initializeSpecialCellsForLDC( TEMP_srcGrid );
LBM_initializeSpecialCellsForLDC( TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//Setup DEVICE datastructures
CUDA_LBM_allocateGrid( (float**) &CUDA_srcGrid );
CUDA_LBM_allocateGrid( (float**) &CUDA_dstGrid );
//Initialize DEVICE datastructures
CUDA_LBM_initializeGrid( (float**)&CUDA_srcGrid, (float**)&TEMP_srcGrid );
CUDA_LBM_initializeGrid( (float**)&CUDA_dstGrid, (float**)&TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_showGridStatistics( TEMP_srcGrid );
LBM_freeGrid( (float**) &TEMP_srcGrid );
LBM_freeGrid( (float**) &TEMP_dstGrid );
}
/*############################################################################*/
void MAIN_finalize( const MAIN_Param* param ) {
LBM_Grid TEMP_srcGrid;
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
CUDA_LBM_getDeviceGrid((float**)&CUDA_srcGrid, (float**)&TEMP_srcGrid);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_showGridStatistics( TEMP_srcGrid );
//LBM_storeVelocityField( TEMP_srcGrid, param->resultFilename, TRUE );
LBM_freeGrid( (float**) &TEMP_srcGrid );
CUDA_LBM_freeGrid( (float**) &CUDA_srcGrid );
CUDA_LBM_freeGrid( (float**) &CUDA_dstGrid );
}
|
bf6e2d43798ed0aa6e7d4ec26a011b0615a63892.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
// includes, system
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "lbm.h"
#include "main.h"
#ifndef __MCUDA__
#include <cuda.h>
#else
#include <mcuda.h>
#endif
#define ITERS 2
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
// includes, kernels
#include "lbm_kernel.cu"
#define REAL_MARGIN (CALC_INDEX(0, 0, 2, 0) - CALC_INDEX(0,0,0,0))
#define TOTAL_MARGIN (2*PADDED_X*PADDED_Y*N_CELL_ENTRIES)
/******************************************************************************/
void CUDA_LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) {
dim3 dimBlock, dimGrid;
dimBlock.x = SIZE_X;
dimGrid.x = SIZE_Y;
dimGrid.y = SIZE_Z;
dimBlock.y = dimBlock.z = dimGrid.z = 1;
performStreamCollide_kernel<<<dimGrid, dimBlock>>>(srcGrid, dstGrid);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
*ptr = (float*)malloc( size );
if( ! *ptr ) {
printf( "LBM_allocateGrid: could not allocate %.1f MByte\n",
size / (1024.0*1024.0) );
exit( 1 );
}
memset( *ptr, 0, size );
printf( "LBM_allocateGrid: allocated %.1f MByte\n",
size / (1024.0*1024.0) );
*ptr += REAL_MARGIN;
}
/******************************************************************************/
void CUDA_LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaMalloc((void**)ptr, size);
CUDA_ERRCK;
*ptr += REAL_MARGIN;
}
/*############################################################################*/
void LBM_freeGrid( float** ptr ) {
free( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/******************************************************************************/
void CUDA_LBM_freeGrid( float** ptr ) {
cudaFree( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/*############################################################################*/
void LBM_initializeGrid( LBM_Grid grid ) {
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
SRC_C( grid ) = DFL1;
SRC_N( grid ) = DFL2;
SRC_S( grid ) = DFL2;
SRC_E( grid ) = DFL2;
SRC_W( grid ) = DFL2;
SRC_T( grid ) = DFL2;
SRC_B( grid ) = DFL2;
SRC_NE( grid ) = DFL3;
SRC_NW( grid ) = DFL3;
SRC_SE( grid ) = DFL3;
SRC_SW( grid ) = DFL3;
SRC_NT( grid ) = DFL3;
SRC_NB( grid ) = DFL3;
SRC_ST( grid ) = DFL3;
SRC_SB( grid ) = DFL3;
SRC_ET( grid ) = DFL3;
SRC_EB( grid ) = DFL3;
SRC_WT( grid ) = DFL3;
SRC_WB( grid ) = DFL3;
CLEAR_ALL_FLAGS_SWEEP( grid );
SWEEP_END
}
/******************************************************************************/
void CUDA_LBM_initializeGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaMemcpy(*d_grid - REAL_MARGIN, *h_grid - REAL_MARGIN, size, cudaMemcpyHostToDevice);
CUDA_ERRCK;
}
void CUDA_LBM_getDeviceGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaThreadSynchronize();
CUDA_ERRCK;
cudaMemcpy(*h_grid - REAL_MARGIN, *d_grid - REAL_MARGIN, size, cudaMemcpyDeviceToHost);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_swapGrids( LBM_GridPtr grid1, LBM_GridPtr grid2 ) {
LBM_Grid aux = *grid1;
*grid1 = *grid2;
*grid2 = aux;
}
/*############################################################################*/
void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) {
int x, y, z;
FILE* file = fopen( filename, "rb" );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE );
}
fgetc( file );
}
fgetc( file );
}
fclose( file );
}
/*############################################################################*/
void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) {
int x, y, z;
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ||
z == 0 || z == SIZE_Z-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
}
else {
if( (z == 1 || z == SIZE_Z-2) &&
x > 1 && x < SIZE_X-2 &&
y > 1 && y < SIZE_Y-2 ) {
SET_FLAG( grid, x, y, z, ACCEL );
}
}
}
}
}
}
/*############################################################################*/
void LBM_showGridStatistics( LBM_Grid grid ) {
int nObstacleCells = 0,
nAccelCells = 0,
nFluidCells = 0;
float ux, uy, uz;
float minU2 = 1e+30, maxU2 = -1e+30, u2;
float minRho = 1e+30, maxRho = -1e+30, rho;
float mass = 0;
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
rho = LOCAL( grid, C ) + LOCAL( grid, N )
+ LOCAL( grid, S ) + LOCAL( grid, E )
+ LOCAL( grid, W ) + LOCAL( grid, T )
+ LOCAL( grid, B ) + LOCAL( grid, NE )
+ LOCAL( grid, NW ) + LOCAL( grid, SE )
+ LOCAL( grid, SW ) + LOCAL( grid, NT )
+ LOCAL( grid, NB ) + LOCAL( grid, ST )
+ LOCAL( grid, SB ) + LOCAL( grid, ET )
+ LOCAL( grid, EB ) + LOCAL( grid, WT )
+ LOCAL( grid, WB );
if( rho < minRho ) minRho = rho;
if( rho > maxRho ) maxRho = rho;
mass += rho;
if( TEST_FLAG_SWEEP( grid, OBSTACLE )) {
nObstacleCells++;
}
else {
if( TEST_FLAG_SWEEP( grid, ACCEL ))
nAccelCells++;
else
nFluidCells++;
ux = + LOCAL( grid, E ) - LOCAL( grid, W )
+ LOCAL( grid, NE ) - LOCAL( grid, NW )
+ LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, ET ) + LOCAL( grid, EB )
- LOCAL( grid, WT ) - LOCAL( grid, WB );
uy = + LOCAL( grid, N ) - LOCAL( grid, S )
+ LOCAL( grid, NE ) + LOCAL( grid, NW )
- LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, NT ) + LOCAL( grid, NB )
- LOCAL( grid, ST ) - LOCAL( grid, SB );
uz = + LOCAL( grid, T ) - LOCAL( grid, B )
+ LOCAL( grid, NT ) - LOCAL( grid, NB )
+ LOCAL( grid, ST ) - LOCAL( grid, SB )
+ LOCAL( grid, ET ) - LOCAL( grid, EB )
+ LOCAL( grid, WT ) - LOCAL( grid, WB );
u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);
if( u2 < minU2 ) minU2 = u2;
if( u2 > maxU2 ) maxU2 = u2;
}
SWEEP_END
printf( "LBM_showGridStatistics:\n"
"\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n"
"\tminRho: %8.4f maxRho: %8.4f mass: %e\n"
"\tminU: %e maxU: %e\n\n",
nObstacleCells, nAccelCells, nFluidCells,
minRho, maxRho, mass,
sqrt( minU2 ), sqrt( maxU2 ) );
}
/*############################################################################*/
static void storeValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
const char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
unsigned i;
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1];
fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
}
else { /* little endian */
fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
void LBM_storeVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
OUTPUT_PRECISION rho, ux, uy, uz;
FILE* file = fopen( filename, (binary ? "wb" : "w") );
SWEEP_VAR
SWEEP_START(0,0,0,SIZE_X,SIZE_Y,SIZE_Z)
rho = + SRC_C( grid ) + SRC_N( grid )
+ SRC_S( grid ) + SRC_E( grid )
+ SRC_W( grid ) + SRC_T( grid )
+ SRC_B( grid ) + SRC_NE( grid )
+ SRC_NW( grid ) + SRC_SE( grid )
+ SRC_SW( grid ) + SRC_NT( grid )
+ SRC_NB( grid ) + SRC_ST( grid )
+ SRC_SB( grid ) + SRC_ET( grid )
+ SRC_EB( grid ) + SRC_WT( grid )
+ SRC_WB( grid );
ux = + SRC_E( grid ) - SRC_W( grid )
+ SRC_NE( grid ) - SRC_NW( grid )
+ SRC_SE( grid ) - SRC_SW( grid )
+ SRC_ET( grid ) + SRC_EB( grid )
- SRC_WT( grid ) - SRC_WB( grid );
uy = + SRC_N( grid ) - SRC_S( grid )
+ SRC_NE( grid ) + SRC_NW( grid )
- SRC_SE( grid ) - SRC_SW( grid )
+ SRC_NT( grid ) + SRC_NB( grid )
- SRC_ST( grid ) - SRC_SB( grid );
uz = + SRC_T( grid ) - SRC_B( grid )
+ SRC_NT( grid ) - SRC_NB( grid )
+ SRC_ST( grid ) - SRC_SB( grid )
+ SRC_ET( grid ) - SRC_EB( grid )
+ SRC_WT( grid ) - SRC_WB( grid );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
/*
fwrite( &ux, sizeof( ux ), 1, file );
fwrite( &uy, sizeof( uy ), 1, file );
fwrite( &uz, sizeof( uz ), 1, file );
*/
storeValue( file, &ux );
storeValue( file, &uy );
storeValue( file, &uz );
} else
fprintf( file, "%e %e %e\n", ux, uy, uz );
SWEEP_END;
fclose( file );
}
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
#include "main.h"
/*############################################################################*/
static LBM_Grid CUDA_srcGrid, CUDA_dstGrid;
/*############################################################################*/
//struct pb_TimerSet timers;
int main( int nArgs, char* arg[] ) {
MAIN_Param param;
int t;
//pb_InitializeTimerSet(&timers);
//struct pb_Parameters* params;
//params = pb_ReadParameters(&nArgs, arg);
/* Initialize the parameters structure */
struct pb_Parameters *params = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
params->outFile = NULL;
params->inpFiles = (char **)malloc(sizeof(char *));
params->inpFiles[0] = NULL;
// Read input from command line
#ifdef SIZE0
params->inpFiles[0] = "~/software/parboil-2.5/datasets/lbm/short/input/120_120_150_ldc.of";
#endif
#ifdef SIZE1
params->inpFiles[0] = "~/software/parboil-2.5/datasets/lbm/long/input/120_120_150_ldc.of";
#endif
static LBM_GridPtr TEMP_srcGrid;
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
MAIN_parseCommandLine( nArgs, arg, ¶m, params );
MAIN_printInfo( ¶m );
MAIN_initialize( ¶m );
for( t = 1; t <= param.nTimeSteps; t++ ) {
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
CUDA_LBM_performStreamCollide( CUDA_srcGrid, CUDA_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_swapGrids( &CUDA_srcGrid, &CUDA_dstGrid );
//if( (t & 63) == 0 ) {
printf( "timestep: %i\n", t );
}
MAIN_finalize( ¶m );
LBM_freeGrid( (float**) &TEMP_srcGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
// pb_FreeParameters(params);
return 0;
}
/*############################################################################*/
void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param, struct pb_Parameters * params ) {
struct stat fileStat;
/*
if( nArgs < 2 ) {
printf( "syntax: lbm <time steps>\n" );
exit( 1 );
}
*/
param->nTimeSteps = ITERS;//atoi( arg[1] );
if( params->inpFiles[0] != NULL ) {
param->obstacleFilename = params->inpFiles[0];
if( stat( param->obstacleFilename, &fileStat ) != 0 ) {
printf( "MAIN_parseCommandLine: cannot stat obstacle file '%s'\n",
param->obstacleFilename );
exit( 1 );
}
if( fileStat.st_size != SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ) {
printf( "MAIN_parseCommandLine:\n"
"\tsize of file '%s' is %i bytes\n"
"\texpected size is %i bytes\n",
param->obstacleFilename, (int) fileStat.st_size,
SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z );
exit( 1 );
}
}
else param->obstacleFilename = NULL;
param->resultFilename = params->outFile;
}
/*############################################################################*/
void MAIN_printInfo( const MAIN_Param* param ) {
printf( "MAIN_printInfo:\n"
"\tgrid size : %i x %i x %i = %.2f * 10^6 Cells\n"
"\tnTimeSteps : %i\n"
"\tresult file : %s\n"
"\taction : %s\n"
"\tsimulation type: %s\n"
"\tobstacle file : %s\n\n",
SIZE_X, SIZE_Y, SIZE_Z, 1e-6*SIZE_X*SIZE_Y*SIZE_Z,
param->nTimeSteps, param->resultFilename,
"store", "lid-driven cavity",
(param->obstacleFilename == NULL) ? "<none>" :
param->obstacleFilename );
}
/*############################################################################*/
void MAIN_initialize( const MAIN_Param* param ) {
static LBM_Grid TEMP_srcGrid, TEMP_dstGrid;
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
LBM_allocateGrid( (float**) &TEMP_dstGrid );
LBM_initializeGrid( TEMP_srcGrid );
LBM_initializeGrid( TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
if( param->obstacleFilename != NULL ) {
LBM_loadObstacleFile( TEMP_srcGrid, param->obstacleFilename );
LBM_loadObstacleFile( TEMP_dstGrid, param->obstacleFilename );
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_initializeSpecialCellsForLDC( TEMP_srcGrid );
LBM_initializeSpecialCellsForLDC( TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
//Setup DEVICE datastructures
CUDA_LBM_allocateGrid( (float**) &CUDA_srcGrid );
CUDA_LBM_allocateGrid( (float**) &CUDA_dstGrid );
//Initialize DEVICE datastructures
CUDA_LBM_initializeGrid( (float**)&CUDA_srcGrid, (float**)&TEMP_srcGrid );
CUDA_LBM_initializeGrid( (float**)&CUDA_dstGrid, (float**)&TEMP_dstGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_showGridStatistics( TEMP_srcGrid );
LBM_freeGrid( (float**) &TEMP_srcGrid );
LBM_freeGrid( (float**) &TEMP_dstGrid );
}
/*############################################################################*/
void MAIN_finalize( const MAIN_Param* param ) {
LBM_Grid TEMP_srcGrid;
//Setup TEMP datastructures
LBM_allocateGrid( (float**) &TEMP_srcGrid );
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
CUDA_LBM_getDeviceGrid((float**)&CUDA_srcGrid, (float**)&TEMP_srcGrid);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
LBM_showGridStatistics( TEMP_srcGrid );
//LBM_storeVelocityField( TEMP_srcGrid, param->resultFilename, TRUE );
LBM_freeGrid( (float**) &TEMP_srcGrid );
CUDA_LBM_freeGrid( (float**) &CUDA_srcGrid );
CUDA_LBM_freeGrid( (float**) &CUDA_dstGrid );
}
|
22c4cf57ea89ad64080a00db7c8d5dd810c6bbf9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Lines.h"
#include "input.h"
#include "Random.h"
#include "hip/hip_runtime.h"
#include "host_defines.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
extern vector<struct Line> originalConstraints;
extern struct Vertex Solution;
extern int randomSeed;
// Intersection vertex
bool Intersection(struct Line *l1, struct Line *l2, struct Vertex *v1)
{
if (fabs(l1->a1 * l2->a2 - l2->a1 * l1->a2) < EQUAL_NUM)
{
v1 = NULL;
return false;
}
v1->x = -(l1->b * l2->a2 - l2->b * l1->a2) / (l1->a1 * l2->a2 - l2->a1 * l1->a2);
v1->y = (l1->b * l2->a1 - l2->b * l1->a1) / (l1->a2 * l2->a1 - l1->a1 * l2->a2);
//printf("Intersection: (%lf, %lf)\n", v1->x, v1->y);
return true;
}
void Slope(struct Line *l)
{
if (fabs(l->a2 - 0.0) < EQUAL_NUM)
{
if ((l->a1 > 0 && l->a2 < 0) || (l->a1 < 0 && l->a2 > 0))
{
l->lslope = FLT_MAX;
}
else if ((l->a1 < 0 && l->a2 < 0) || (l->a1 > 0 && l->a2 > 0))
{
l->lslope = -FLT_MAX;
}
else
{
l->lslope = -l->a1 / l->a2;
}
return;
}
l->lslope = -l->a1 / l->a2;
return;
}
// Slope line
__device__ void SlopeDevice(struct Line *l)
{
if (fabs(l->a2 - 0.0) < EQUAL_NUM)
{
if ((l->a1 > 0 && l->a2 < 0) || (l->a1 < 0 && l->a2 > 0))
{
l->lslope = FLT_MAX;
}
else if ((l->a1 < 0 && l->a2 < 0) || (l->a1 > 0 && l->a2 > 0))
{
l->lslope = -FLT_MAX;
}
else
{
l->lslope = -l->a1 / l->a2;
}
return;
}
l->lslope = -l->a1 / l->a2;
return;
}
// Compare
int cmp(const void *a, const void *b)
{
struct Line *aa = (struct Line *)a;
struct Line *bb = (struct Line *)b;
return ((aa->lslope > bb->lslope) ? 1 : -1);
}
// Rotation
__global__ void kRotation(struct Line oConstraints[], struct Line lines[], struct Objfunc *object, int *index, int *numG, int *numH)
{
//__shared__ int numGtemp;
//__shared__ int numHtemp;
double thetaArc, thetaDec;
if (object->c2 == 0 && object->c1 > 0) {
thetaArc = -PI / 2;
thetaDec = -90;
}
else if (object->c2 == 0 && object->c1 < 0) {
thetaArc = PI / 2;
thetaDec = 90;
}
else {
thetaArc = atan(-object->c1 / object->c2);
thetaDec = atan(-object->c1 / object->c2) * 180 / PI;
}
int i;
double a1Temp, a2Temp, bTemp;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
//printf("offset: %d\n", offset);
if (offset < (*index)) {
a1Temp = oConstraints[offset].a1;
a2Temp = oConstraints[offset].a2;
bTemp = oConstraints[offset].b;
lines[offset].a1 = cos(thetaArc) * a1Temp + sin(thetaArc) * a2Temp;
lines[offset].a2 = cos(thetaArc) * a2Temp - sin(thetaArc) * a1Temp;
lines[offset].b = bTemp;
lines[offset].index = offset;
//printf("%lf\n", lines[offset].a2);
if (lines[offset].a2 > 0) {
//__threadfence();
atomicAdd(numG, 1);
//(*numG)++;
//printf("%d", (*numG));
}
else if (lines[offset].a2 < 0) {
//__threadfence();
atomicAdd(numH, 1);
//(*numH)++;
//printf("%d", (*numH));
}
else {
return;
}
SlopeDevice(&lines[offset]);
lines[offset].beingUsed = true;
}
//__threadfence();
__syncthreads();
__threadfence();
/*
if (offset == 2) {
(*numG) = numGtemp;
(*numH) = numHtemp;
}*/
return;
/*
for (i = 0; i < (*index); i += 1) {
a1Temp = oConstraints[i].a1;
a2Temp = oConstraints[i].a2;
bTemp = oConstraints[i].b;
lines[i].a1 = cos(thetaArc) * a1Temp + sin(thetaArc) * a2Temp;
lines[i].a2 = cos(thetaArc) * a2Temp - sin(thetaArc) * a1Temp;
lines[i].b = bTemp;
lines[i].index = i;
if (lines[i].a2 > 0) {
(*numG)++;
//printf("%d", (*numG));
}
else if (lines[i].a2 < 0) {
(*numH)++;
//printf("%d", (*numH));
}
else {
(*ret) = false;
return;
}
Slope(&lines[i]);
lines[i].beingUsed = true;
}
if ((*numG) + (*numH) != (*index)) {
printf("Fatal Error at Rotation()!\n");
exit(-1);
}
*/
return;
}
// Separation - O(n)
bool Separation(struct Line I1[], struct Line I2[], struct Line lines[], int numG, int numH)
{
int index = numG + numH;
int i, g = 0, h = 0;
for (i = 0; i < index; i++) {
if (lines[i].a2 > 0) {
I1[g].a1 = -lines[i].a1 / lines[i].a2;
I1[g].a2 = 1;
I1[g].b = lines[i].b / lines[i].a2;
Slope(&I1[g]);
I1[g].lslope = -I1[g].lslope;
I1[g].beingUsed = true;
I1[g].index = lines[i].index;
//cout << I1[g].index << "\n";
g++;
}
else if (lines[i].a2 < 0) {
I2[h].a1 = -lines[i].a1 / lines[i].a2;
I2[h].a2 = 1;
I2[h].b = lines[i].b / lines[i].a2;
Slope(&I2[h]);
I2[h].lslope = -I2[h].lslope;
I2[h].beingUsed = true;
I2[h].index = lines[i].index;
//cout << I2[h].index << "\n";
h++;
}
else {
printf("%d %lf\n", i, lines[i].a2);
return false;
}
}
return true;
}
// Make pairs
bool MakePairs(struct Line I1[], struct Line I2[],
struct Pair pairsG[], struct Pair pairsH[],
int numG, int numH, int *index,
double leftBound, double rightBound)
{
int g, gtemp;
(*index) = 0;
for (g = 0; g < numG; g += 1) {
// drop
if (I1[g].beingUsed == false) {
continue;
}
for (gtemp = g + 1; gtemp < numG; gtemp++) {
if (I1[gtemp].beingUsed == true) {
break;
}
}
if (gtemp == numG) break;
if (fabs(I1[g].lslope - I1[gtemp].lslope) < EQUAL_NUM) {
if (I1[g].b > I1[gtemp].b) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else {
I1[g].beingUsed = false;
g = gtemp - 1;
}
continue;
}
struct Vertex *p = (struct Vertex *)malloc(sizeof(struct Vertex));
Intersection(&I1[g], &I1[gtemp], p);
if (p->x < leftBound || p->x > rightBound) {
if (p->x < leftBound && (I1[g].lslope > I1[gtemp].lslope)) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else if (p->x < leftBound && (I1[g].lslope < I1[gtemp].lslope)) {
I1[g].beingUsed = false;
g = gtemp - 1;
}
else if (p->x > rightBound && (I1[g].lslope < I1[gtemp].lslope)) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else if (p->x > rightBound && (I1[g].lslope > I1[gtemp].lslope)) {
I1[g].beingUsed = false;
g = gtemp - 1;
}
continue;
}
pairsG[(*index)].index = (*index);
pairsG[(*index)].line1 = I1[g];
pairsG[(*index)].index1 = g;
pairsG[(*index)].line2 = I1[gtemp];
pairsG[(*index)].index2 = gtemp;
pairsG[(*index)].point.x = p->x; pairsG[(*index)].point.y = p->y;
//cout << g << " " << gtemp << '\n';
// printf("Intersection2: (%lf, %lf)\n", p->x, p->y);
//printf("Value: %lf, %lf\n", I1[g].a1 * p->x + I1[g].b, I1[gtemp].a1 * p->x + I1[gtemp].b);
(*index)++;
g++;
}
/*
for (h = 0; h < numH; h += 2) {
// drop
pairsH[h / 2].index = *index++;
pairsH[h / 2].line1 = I2[h];
pairsH[h / 2].line2 = I2[h + 1];
Intersection(&I2[h], &I2[h + 1], &pairsH[h / 2].point);
}*/
return true;
}
// sg, Sg, sh, Sh
struct Vertex *TestingLine(struct Pair pairsG[], struct Pair pairsH[],
struct Line I1[], struct Line I2[],
int numG, int numH, int numDot,
double *leftBound, double *rightBound)
{
/*
for (int i = 0; i < numG; i++) {
cout << "Line " << i << " : " << I1[i].beingUsed << endl;
}*/
// Randomly choose a point
//cout << "Testing Line" << endl;
int index = (numDot == 0) ? 0 : (getRandomInt(&randomSeed, numDot));
//printf("%d %d\n", index, numDot);
if (numDot == 0) {
int onlyOne = 0;
bool isFeasible = false;
struct Vertex *vSln = (struct Vertex *)malloc(sizeof(struct Vertex));
vSln->y = -FLT_MAX;
for (onlyOne = 0; onlyOne < numG; onlyOne++) {
if (I1[onlyOne].beingUsed == true) {
isFeasible = true;
break;
}
}
if (isFeasible == true && numH != 0) {
struct Vertex *vTemp = (struct Vertex *)malloc(sizeof(struct Vertex));
for (int i = 0; i < numH; i++) {
Intersection(&(I1[onlyOne]), &(I2[i]), vTemp);
if (vSln->y < vTemp->y) {
vSln->x = vTemp->x;
vSln->y = vTemp->y;
}
}
printf("sln: %lf %lf\n", vSln->x, vSln->y);
return vSln;
}
else {
/*
for (int i = 0; i < numG; i++) {
cout << "beingUsed: " << I1[i].beingUsed << endl;
}*/
cout << "No solution!\n";
exit(0);
}
}
//int index = round ? 1 : 0;
double xPrimeG = pairsG[index].point.x; // x' - xPrime
double yPrimeG = pairsG[index].point.y;
double yPrimeH;
//cout << xPrimeG << '\n';
// struct Line *sg = (&pairsG[index].line1.a1 < &pairsG[index].line2.a1) ? &pairsG[index].line1 : &pairsG[index].line2;
// struct Line *Sg = (&pairsG[index].line1.a1 >= &pairsG[index].line2.a1) ? &pairsG[index].line1 : &pairsG[index].line2;
struct Line *sg = NULL;
struct Line *Sg = NULL;
struct Line *sh = NULL;
struct Line *Sh = NULL;
// struct Line *sh = (&pairsH[index].line1.a1 < &pairsH[index].line2.a1) ? &pairsH[index].line1 : &pairsH[index].line2;
// struct Line *Sh = (&pairsH[index].line1.a1 < &pairsH[index].line2.a1) ? &pairsH[index].line1 : &pairsH[index].line2;
vector<int> linesG;
vector<int> linesH;
// Finding g(x') and H(x')
for (int i = 0; i < numG; i++) {
if (I1[i].beingUsed == true) {
if ((fabs(yPrimeG - (I1[i].a1 * xPrimeG + I1[i].b)) > EQUAL_NUM && yPrimeG < (I1[i].a1 * xPrimeG + I1[i].b)) || (sg == NULL || Sg == NULL)) {
//printf("xPrime yPrime ???: %lf %lf %lf\n", xPrimeG, yPrimeG, (I1[i].a1 * xPrimeG + I1[i].b));
yPrimeG = I1[i].a1 * xPrimeG + I1[i].b;
sg = &I1[i];
Sg = &I1[i];
}
}
}
for (int i = 0; i < numH; i++) {
if (I2[i].beingUsed == true) {
if (sh == NULL || Sh == NULL) {
sh = &I2[i];
Sh = &I2[i];
yPrimeH = I2[i].a1 * xPrimeG + I2[i].b;
}
else if (fabs(yPrimeH - (I2[i].a1 * xPrimeG + I2[i].b)) > EQUAL_NUM && yPrimeH > (I2[i].a1 * xPrimeG + I2[i].b)) {
yPrimeH = I2[i].a1 * xPrimeG + I2[i].b;
sh = &I2[i];
Sh = &I2[i];
}
}
}
if (numH == 0) {
yPrimeH = yPrimeG + 1000.0;
}
// Finding sg - min g(x') && Finding Sg - max g(x')
/*
struct Line *sg = &pairsG[0].line1;
struct Line *Sg = &pairsG[0].line1;
struct Line *sh = &pairsH[0].line1;
struct Line *Sh = &pairsH[0].line1;
*/
for (int i = 0; i < numG; i++) {
double currentLineValueG = I1[i].a1 * xPrimeG + I1[i].b;
if (I1[i].beingUsed == false || fabs(currentLineValueG - yPrimeG) >= EQUAL_NUM) {
continue;
}
if (I1[i].a1 < sg->a1) {
sg = &I1[i];
}
if (I1[i].a1 > Sg->a1) {
Sg = &I1[i];
}
}
// Finding sh - min h(x') && Finding Sh - max h(x')
for (int i = 0; i < numH; i++) {
double currentLineValueH = I2[i].a1 * xPrimeG + I2[i].b;
if (I2[i].beingUsed == false || fabs(currentLineValueH - yPrimeH) >= EQUAL_NUM) {
continue;
}
if (I2[i].a1 < sh->a1) {
sh = &I2[i];
}
if (I2[i].a1 > Sh->a1) {
Sh = &I2[i];
}
}
// Is feasible
if (fabs(yPrimeG - yPrimeH) < 1e-6) {
if (sg->a1 > 0 && sg->a1 >= Sh->a1) {
// x* < x'
if (sh != Sh) {
sh->beingUsed = false;
}
if (sg != Sg) {
Sg->beingUsed = false;
}
*rightBound = xPrimeG;
//cout << "cccccccccc\n";
return NULL;
}
else if (Sg->a1 < 0 && Sg->a1 <= sh->a1) {
// x* > x'
if (sh != Sh) {
Sh->beingUsed = false;
}
if (sg != Sg) {
sg->beingUsed = false;
}
*leftBound = xPrimeG;
//cout << "dddddddddddddd\n";
return NULL;
}
else {
// x* = x'
Solution.x = xPrimeG;
Solution.y = yPrimeG;
//cout << "gggggggggggggggggg\n";
return &(Solution);
}
}
else if (yPrimeG > yPrimeH) { // infeasible
if (sg->a1 > Sh->a1) {
// x* < x'
if (sh != Sh) {
sh->beingUsed = false;
}
if (sg != Sg) {
Sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
}
*rightBound = xPrimeG;
/*
printf("aaaaaaaaaaa %lf %lf %lf\n", xPrimeG, yPrimeG, yPrimeH);
cout << sh->a1 << " " << sh->a1 * xPrimeG + sh->b << " " << originalConstraints[sh->index].a1 << '\n';
cout << Sh->a1 << " " << Sh->a1 * xPrimeG + Sh->b << " " << originalConstraints[Sh->index].a1 << '\n';
*/
return NULL;
}
else if (Sg->a1 < sh->a1) {
// x* > x'
if (sh != Sh) {
Sh->beingUsed = false;
}
if (sg != Sg) {
sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
}
*leftBound = xPrimeG;
//printf("bbbbbbbbbbbbbbb\n");
return NULL;
}
else if ((sg->a1 - Sh->a1) <= 0 && 0 <= (Sg->a1 - sh->a1)) {
// no feasible
printf("No feasible solution!\n");
exit(0);
return NULL;
}
}
else if (yPrimeG < yPrimeH) { // feasible
if (sg->a1 > 0) {
// x* < x'
if (sg != Sg) {
Sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
}
*rightBound = xPrimeG;
//cout << "eeeeeeeeeeeeeeeee\n";
return NULL;
}
else if (Sg->a1 < 0) {
// x* > x'
if (sg != Sg) {
sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
}
*leftBound = xPrimeG;
//cout << "fffffffffffff\n";
return NULL;
}
else if (sg->a1 <= 0 && 0 <= Sg->a1) {
// x* = x'
Solution.x = xPrimeG;
Solution.y = yPrimeG;
//cout << "hhhhhhhhhhhhhh\n";
return &(Solution);
}
}
return NULL;
}
///////////////////////////////////////////////////////////////////////////////////
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
bool Rotation(struct Line lines[], struct Objfunc object, int index, int *numG, int *numH)
{
bool ret;
// Original Constraints
struct Line *dev_oConstraints;
unsigned int size = index * sizeof(struct Line);
HANDLE_ERROR(hipMalloc((void**)&dev_oConstraints, size));
// Lines after rotation
struct Line *dev_lines;
HANDLE_ERROR(hipMalloc((void**)&dev_lines, size));
// Objective function
struct Objfunc *dev_object;
HANDLE_ERROR(hipMalloc((void**)&dev_object, sizeof(struct Objfunc)));
// Numbers of lines
int *dev_index;
HANDLE_ERROR(hipMalloc((void**)&dev_index, sizeof(int)));
// Num of G lines
int *dev_numG;
HANDLE_ERROR(hipMalloc((void**)&dev_numG, sizeof(int)));
// Num of H lines
int *dev_numH;
HANDLE_ERROR(hipMalloc((void**)&dev_numH, sizeof(int)));
// Space distribution
unsigned int DIM = 1 + sqrt(index) / 16;
dim3 blocks(DIM, DIM);
dim3 threads(16, 16);
(*numG) = (*numH) = 0;
float time_elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
hipEventRecord(start, 0); //
hipEventRecord(stop, 0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed, start, stop); //
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
printf("%f(ms)\n", time_elapsed);
// Copy from CPU to GPU
HANDLE_ERROR(hipMemcpy(dev_oConstraints, &originalConstraints[0], size, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_object, &object, sizeof(struct Objfunc), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_index, &index, sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_numG, numG, sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_numH, numH, sizeof(int), hipMemcpyHostToDevice));
// Kernel function <<<blocks, threads>>>
kRotation << <blocks, threads >> >(dev_oConstraints, dev_lines, dev_object, dev_index, dev_numG, dev_numH);
// Copy from GPU to CPU
HANDLE_ERROR(hipMemcpy(numG, dev_numG, sizeof(int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(numH, dev_numH, sizeof(int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(lines, dev_lines, size, hipMemcpyDeviceToHost));
printf("%d %d\n", (*numG), (*numH));
if ((*numH) + (*numG) != index) {
ret = false;
}
else {
ret = true;
}
return ret;
}
|
22c4cf57ea89ad64080a00db7c8d5dd810c6bbf9.cu
|
#include "Lines.h"
#include "input.h"
#include "Random.h"
#include "cuda.h"
#include "host_defines.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
extern vector<struct Line> originalConstraints;
extern struct Vertex Solution;
extern int randomSeed;
// Intersection vertex
bool Intersection(struct Line *l1, struct Line *l2, struct Vertex *v1)
{
if (fabs(l1->a1 * l2->a2 - l2->a1 * l1->a2) < EQUAL_NUM)
{
v1 = NULL;
return false;
}
v1->x = -(l1->b * l2->a2 - l2->b * l1->a2) / (l1->a1 * l2->a2 - l2->a1 * l1->a2);
v1->y = (l1->b * l2->a1 - l2->b * l1->a1) / (l1->a2 * l2->a1 - l1->a1 * l2->a2);
//printf("Intersection: (%lf, %lf)\n", v1->x, v1->y);
return true;
}
void Slope(struct Line *l)
{
if (fabs(l->a2 - 0.0) < EQUAL_NUM)
{
if ((l->a1 > 0 && l->a2 < 0) || (l->a1 < 0 && l->a2 > 0))
{
l->lslope = FLT_MAX;
}
else if ((l->a1 < 0 && l->a2 < 0) || (l->a1 > 0 && l->a2 > 0))
{
l->lslope = -FLT_MAX;
}
else
{
l->lslope = -l->a1 / l->a2;
}
return;
}
l->lslope = -l->a1 / l->a2;
return;
}
// Slope line
__device__ void SlopeDevice(struct Line *l)
{
if (fabs(l->a2 - 0.0) < EQUAL_NUM)
{
if ((l->a1 > 0 && l->a2 < 0) || (l->a1 < 0 && l->a2 > 0))
{
l->lslope = FLT_MAX;
}
else if ((l->a1 < 0 && l->a2 < 0) || (l->a1 > 0 && l->a2 > 0))
{
l->lslope = -FLT_MAX;
}
else
{
l->lslope = -l->a1 / l->a2;
}
return;
}
l->lslope = -l->a1 / l->a2;
return;
}
// Compare
int cmp(const void *a, const void *b)
{
struct Line *aa = (struct Line *)a;
struct Line *bb = (struct Line *)b;
return ((aa->lslope > bb->lslope) ? 1 : -1);
}
// Rotation
__global__ void kRotation(struct Line oConstraints[], struct Line lines[], struct Objfunc *object, int *index, int *numG, int *numH)
{
//__shared__ int numGtemp;
//__shared__ int numHtemp;
double thetaArc, thetaDec;
if (object->c2 == 0 && object->c1 > 0) {
thetaArc = -PI / 2;
thetaDec = -90;
}
else if (object->c2 == 0 && object->c1 < 0) {
thetaArc = PI / 2;
thetaDec = 90;
}
else {
thetaArc = atan(-object->c1 / object->c2);
thetaDec = atan(-object->c1 / object->c2) * 180 / PI;
}
int i;
double a1Temp, a2Temp, bTemp;
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
//printf("offset: %d\n", offset);
if (offset < (*index)) {
a1Temp = oConstraints[offset].a1;
a2Temp = oConstraints[offset].a2;
bTemp = oConstraints[offset].b;
lines[offset].a1 = cos(thetaArc) * a1Temp + sin(thetaArc) * a2Temp;
lines[offset].a2 = cos(thetaArc) * a2Temp - sin(thetaArc) * a1Temp;
lines[offset].b = bTemp;
lines[offset].index = offset;
//printf("%lf\n", lines[offset].a2);
if (lines[offset].a2 > 0) {
//__threadfence();
atomicAdd(numG, 1);
//(*numG)++;
//printf("%d", (*numG));
}
else if (lines[offset].a2 < 0) {
//__threadfence();
atomicAdd(numH, 1);
//(*numH)++;
//printf("%d", (*numH));
}
else {
return;
}
SlopeDevice(&lines[offset]);
lines[offset].beingUsed = true;
}
//__threadfence();
__syncthreads();
__threadfence();
/*
if (offset == 2) {
(*numG) = numGtemp;
(*numH) = numHtemp;
}*/
return;
/*
for (i = 0; i < (*index); i += 1) {
a1Temp = oConstraints[i].a1;
a2Temp = oConstraints[i].a2;
bTemp = oConstraints[i].b;
lines[i].a1 = cos(thetaArc) * a1Temp + sin(thetaArc) * a2Temp;
lines[i].a2 = cos(thetaArc) * a2Temp - sin(thetaArc) * a1Temp;
lines[i].b = bTemp;
lines[i].index = i;
if (lines[i].a2 > 0) {
(*numG)++;
//printf("%d", (*numG));
}
else if (lines[i].a2 < 0) {
(*numH)++;
//printf("%d", (*numH));
}
else {
(*ret) = false;
return;
}
Slope(&lines[i]);
lines[i].beingUsed = true;
}
if ((*numG) + (*numH) != (*index)) {
printf("Fatal Error at Rotation()!\n");
exit(-1);
}
*/
return;
}
// Separation - O(n)
bool Separation(struct Line I1[], struct Line I2[], struct Line lines[], int numG, int numH)
{
int index = numG + numH;
int i, g = 0, h = 0;
for (i = 0; i < index; i++) {
if (lines[i].a2 > 0) {
I1[g].a1 = -lines[i].a1 / lines[i].a2;
I1[g].a2 = 1;
I1[g].b = lines[i].b / lines[i].a2;
Slope(&I1[g]);
I1[g].lslope = -I1[g].lslope;
I1[g].beingUsed = true;
I1[g].index = lines[i].index;
//cout << I1[g].index << "\n";
g++;
}
else if (lines[i].a2 < 0) {
I2[h].a1 = -lines[i].a1 / lines[i].a2;
I2[h].a2 = 1;
I2[h].b = lines[i].b / lines[i].a2;
Slope(&I2[h]);
I2[h].lslope = -I2[h].lslope;
I2[h].beingUsed = true;
I2[h].index = lines[i].index;
//cout << I2[h].index << "\n";
h++;
}
else {
printf("%d %lf\n", i, lines[i].a2);
return false;
}
}
return true;
}
// Make pairs
bool MakePairs(struct Line I1[], struct Line I2[],
struct Pair pairsG[], struct Pair pairsH[],
int numG, int numH, int *index,
double leftBound, double rightBound)
{
int g, gtemp;
(*index) = 0;
for (g = 0; g < numG; g += 1) {
// drop
if (I1[g].beingUsed == false) {
continue;
}
for (gtemp = g + 1; gtemp < numG; gtemp++) {
if (I1[gtemp].beingUsed == true) {
break;
}
}
if (gtemp == numG) break;
if (fabs(I1[g].lslope - I1[gtemp].lslope) < EQUAL_NUM) {
if (I1[g].b > I1[gtemp].b) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else {
I1[g].beingUsed = false;
g = gtemp - 1;
}
continue;
}
struct Vertex *p = (struct Vertex *)malloc(sizeof(struct Vertex));
Intersection(&I1[g], &I1[gtemp], p);
if (p->x < leftBound || p->x > rightBound) {
if (p->x < leftBound && (I1[g].lslope > I1[gtemp].lslope)) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else if (p->x < leftBound && (I1[g].lslope < I1[gtemp].lslope)) {
I1[g].beingUsed = false;
g = gtemp - 1;
}
else if (p->x > rightBound && (I1[g].lslope < I1[gtemp].lslope)) {
I1[gtemp].beingUsed = false;
g = g - 1;
}
else if (p->x > rightBound && (I1[g].lslope > I1[gtemp].lslope)) {
I1[g].beingUsed = false;
g = gtemp - 1;
}
continue;
}
pairsG[(*index)].index = (*index);
pairsG[(*index)].line1 = I1[g];
pairsG[(*index)].index1 = g;
pairsG[(*index)].line2 = I1[gtemp];
pairsG[(*index)].index2 = gtemp;
pairsG[(*index)].point.x = p->x; pairsG[(*index)].point.y = p->y;
//cout << g << " " << gtemp << '\n';
// printf("Intersection2: (%lf, %lf)\n", p->x, p->y);
//printf("Value: %lf, %lf\n", I1[g].a1 * p->x + I1[g].b, I1[gtemp].a1 * p->x + I1[gtemp].b);
(*index)++;
g++;
}
/*
for (h = 0; h < numH; h += 2) {
// drop
pairsH[h / 2].index = *index++;
pairsH[h / 2].line1 = I2[h];
pairsH[h / 2].line2 = I2[h + 1];
Intersection(&I2[h], &I2[h + 1], &pairsH[h / 2].point);
}*/
return true;
}
// sg, Sg, sh, Sh
struct Vertex *TestingLine(struct Pair pairsG[], struct Pair pairsH[],
struct Line I1[], struct Line I2[],
int numG, int numH, int numDot,
double *leftBound, double *rightBound)
{
/*
for (int i = 0; i < numG; i++) {
cout << "Line " << i << " : " << I1[i].beingUsed << endl;
}*/
// Randomly choose a point
//cout << "Testing Line" << endl;
int index = (numDot == 0) ? 0 : (getRandomInt(&randomSeed, numDot));
//printf("%d %d\n", index, numDot);
if (numDot == 0) {
int onlyOne = 0;
bool isFeasible = false;
struct Vertex *vSln = (struct Vertex *)malloc(sizeof(struct Vertex));
vSln->y = -FLT_MAX;
for (onlyOne = 0; onlyOne < numG; onlyOne++) {
if (I1[onlyOne].beingUsed == true) {
isFeasible = true;
break;
}
}
if (isFeasible == true && numH != 0) {
struct Vertex *vTemp = (struct Vertex *)malloc(sizeof(struct Vertex));
for (int i = 0; i < numH; i++) {
Intersection(&(I1[onlyOne]), &(I2[i]), vTemp);
if (vSln->y < vTemp->y) {
vSln->x = vTemp->x;
vSln->y = vTemp->y;
}
}
printf("sln: %lf %lf\n", vSln->x, vSln->y);
return vSln;
}
else {
/*
for (int i = 0; i < numG; i++) {
cout << "beingUsed: " << I1[i].beingUsed << endl;
}*/
cout << "No solution!\n";
exit(0);
}
}
//int index = round ? 1 : 0;
double xPrimeG = pairsG[index].point.x; // x' - xPrime
double yPrimeG = pairsG[index].point.y;
double yPrimeH;
//cout << xPrimeG << '\n';
// struct Line *sg = (&pairsG[index].line1.a1 < &pairsG[index].line2.a1) ? &pairsG[index].line1 : &pairsG[index].line2;
// struct Line *Sg = (&pairsG[index].line1.a1 >= &pairsG[index].line2.a1) ? &pairsG[index].line1 : &pairsG[index].line2;
struct Line *sg = NULL;
struct Line *Sg = NULL;
struct Line *sh = NULL;
struct Line *Sh = NULL;
// struct Line *sh = (&pairsH[index].line1.a1 < &pairsH[index].line2.a1) ? &pairsH[index].line1 : &pairsH[index].line2;
// struct Line *Sh = (&pairsH[index].line1.a1 < &pairsH[index].line2.a1) ? &pairsH[index].line1 : &pairsH[index].line2;
vector<int> linesG;
vector<int> linesH;
// Finding g(x') and H(x')
for (int i = 0; i < numG; i++) {
if (I1[i].beingUsed == true) {
if ((fabs(yPrimeG - (I1[i].a1 * xPrimeG + I1[i].b)) > EQUAL_NUM && yPrimeG < (I1[i].a1 * xPrimeG + I1[i].b)) || (sg == NULL || Sg == NULL)) {
//printf("xPrime yPrime ???: %lf %lf %lf\n", xPrimeG, yPrimeG, (I1[i].a1 * xPrimeG + I1[i].b));
yPrimeG = I1[i].a1 * xPrimeG + I1[i].b;
sg = &I1[i];
Sg = &I1[i];
}
}
}
for (int i = 0; i < numH; i++) {
if (I2[i].beingUsed == true) {
if (sh == NULL || Sh == NULL) {
sh = &I2[i];
Sh = &I2[i];
yPrimeH = I2[i].a1 * xPrimeG + I2[i].b;
}
else if (fabs(yPrimeH - (I2[i].a1 * xPrimeG + I2[i].b)) > EQUAL_NUM && yPrimeH > (I2[i].a1 * xPrimeG + I2[i].b)) {
yPrimeH = I2[i].a1 * xPrimeG + I2[i].b;
sh = &I2[i];
Sh = &I2[i];
}
}
}
if (numH == 0) {
yPrimeH = yPrimeG + 1000.0;
}
// Finding sg - min g(x') && Finding Sg - max g(x')
/*
struct Line *sg = &pairsG[0].line1;
struct Line *Sg = &pairsG[0].line1;
struct Line *sh = &pairsH[0].line1;
struct Line *Sh = &pairsH[0].line1;
*/
for (int i = 0; i < numG; i++) {
double currentLineValueG = I1[i].a1 * xPrimeG + I1[i].b;
if (I1[i].beingUsed == false || fabs(currentLineValueG - yPrimeG) >= EQUAL_NUM) {
continue;
}
if (I1[i].a1 < sg->a1) {
sg = &I1[i];
}
if (I1[i].a1 > Sg->a1) {
Sg = &I1[i];
}
}
// Finding sh - min h(x') && Finding Sh - max h(x')
for (int i = 0; i < numH; i++) {
double currentLineValueH = I2[i].a1 * xPrimeG + I2[i].b;
if (I2[i].beingUsed == false || fabs(currentLineValueH - yPrimeH) >= EQUAL_NUM) {
continue;
}
if (I2[i].a1 < sh->a1) {
sh = &I2[i];
}
if (I2[i].a1 > Sh->a1) {
Sh = &I2[i];
}
}
// Is feasible
if (fabs(yPrimeG - yPrimeH) < 1e-6) {
if (sg->a1 > 0 && sg->a1 >= Sh->a1) {
// x* < x'
if (sh != Sh) {
sh->beingUsed = false;
}
if (sg != Sg) {
Sg->beingUsed = false;
}
*rightBound = xPrimeG;
//cout << "cccccccccc\n";
return NULL;
}
else if (Sg->a1 < 0 && Sg->a1 <= sh->a1) {
// x* > x'
if (sh != Sh) {
Sh->beingUsed = false;
}
if (sg != Sg) {
sg->beingUsed = false;
}
*leftBound = xPrimeG;
//cout << "dddddddddddddd\n";
return NULL;
}
else {
// x* = x'
Solution.x = xPrimeG;
Solution.y = yPrimeG;
//cout << "gggggggggggggggggg\n";
return &(Solution);
}
}
else if (yPrimeG > yPrimeH) { // infeasible
if (sg->a1 > Sh->a1) {
// x* < x'
if (sh != Sh) {
sh->beingUsed = false;
}
if (sg != Sg) {
Sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
}
*rightBound = xPrimeG;
/*
printf("aaaaaaaaaaa %lf %lf %lf\n", xPrimeG, yPrimeG, yPrimeH);
cout << sh->a1 << " " << sh->a1 * xPrimeG + sh->b << " " << originalConstraints[sh->index].a1 << '\n';
cout << Sh->a1 << " " << Sh->a1 * xPrimeG + Sh->b << " " << originalConstraints[Sh->index].a1 << '\n';
*/
return NULL;
}
else if (Sg->a1 < sh->a1) {
// x* > x'
if (sh != Sh) {
Sh->beingUsed = false;
}
if (sg != Sg) {
sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
}
*leftBound = xPrimeG;
//printf("bbbbbbbbbbbbbbb\n");
return NULL;
}
else if ((sg->a1 - Sh->a1) <= 0 && 0 <= (Sg->a1 - sh->a1)) {
// no feasible
printf("No feasible solution!\n");
exit(0);
return NULL;
}
}
else if (yPrimeG < yPrimeH) { // feasible
if (sg->a1 > 0) {
// x* < x'
if (sg != Sg) {
Sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
}
*rightBound = xPrimeG;
//cout << "eeeeeeeeeeeeeeeee\n";
return NULL;
}
else if (Sg->a1 < 0) {
// x* > x'
if (sg != Sg) {
sg->beingUsed = false;
}
else {
if (pairsG[index].line1.a1 < pairsG[index].line2.a1) {
//pairsG[index].line1.beingUsed = false;
I1[pairsG[index].index1].beingUsed = false;
}
else if (pairsG[index].line1.a1 > pairsG[index].line2.a1) {
//pairsG[index].line2.beingUsed = false;
I1[pairsG[index].index2].beingUsed = false;
}
}
*leftBound = xPrimeG;
//cout << "fffffffffffff\n";
return NULL;
}
else if (sg->a1 <= 0 && 0 <= Sg->a1) {
// x* = x'
Solution.x = xPrimeG;
Solution.y = yPrimeG;
//cout << "hhhhhhhhhhhhhh\n";
return &(Solution);
}
}
return NULL;
}
///////////////////////////////////////////////////////////////////////////////////
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
bool Rotation(struct Line lines[], struct Objfunc object, int index, int *numG, int *numH)
{
bool ret;
// Original Constraints
struct Line *dev_oConstraints;
unsigned int size = index * sizeof(struct Line);
HANDLE_ERROR(cudaMalloc((void**)&dev_oConstraints, size));
// Lines after rotation
struct Line *dev_lines;
HANDLE_ERROR(cudaMalloc((void**)&dev_lines, size));
// Objective function
struct Objfunc *dev_object;
HANDLE_ERROR(cudaMalloc((void**)&dev_object, sizeof(struct Objfunc)));
// Numbers of lines
int *dev_index;
HANDLE_ERROR(cudaMalloc((void**)&dev_index, sizeof(int)));
// Num of G lines
int *dev_numG;
HANDLE_ERROR(cudaMalloc((void**)&dev_numG, sizeof(int)));
// Num of H lines
int *dev_numH;
HANDLE_ERROR(cudaMalloc((void**)&dev_numH, sizeof(int)));
// Space distribution
unsigned int DIM = 1 + sqrt(index) / 16;
dim3 blocks(DIM, DIM);
dim3 threads(16, 16);
(*numG) = (*numH) = 0;
float time_elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
cudaEventRecord(start, 0); //记录当前时间
cudaEventRecord(stop, 0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed, start, stop); //计算时间差
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
printf("执行时间:%f(ms)\n", time_elapsed);
// Copy from CPU to GPU
HANDLE_ERROR(cudaMemcpy(dev_oConstraints, &originalConstraints[0], size, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_object, &object, sizeof(struct Objfunc), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_index, &index, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_numG, numG, sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_numH, numH, sizeof(int), cudaMemcpyHostToDevice));
// Kernel function <<<blocks, threads>>>
kRotation << <blocks, threads >> >(dev_oConstraints, dev_lines, dev_object, dev_index, dev_numG, dev_numH);
// Copy from GPU to CPU
HANDLE_ERROR(cudaMemcpy(numG, dev_numG, sizeof(int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(numH, dev_numH, sizeof(int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(lines, dev_lines, size, cudaMemcpyDeviceToHost));
printf("%d %d\n", (*numG), (*numH));
if ((*numH) + (*numG) != index) {
ret = false;
}
else {
ret = true;
}
return ret;
}
|
ba5cdd6293cdf733192d884d3c470af77edf1109.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by user on 15-11-16.
//
#include "cuMatExecDevice.h"
#include <hiprand/hiprand_kernel.h>
#include <iostream>
using std::cout;
using std::endl;
namespace cuExec{
// template<typename Scalar>
// void cuAlgorithm<Scalar>::setConstant(Scalar *data, const size_t size, const Scalar value) {
// thrust::device_ptr<Scalar> d_ptr(data);
// thrust::fill_n(d_ptr, size, value);
// }
// template<typename Scalar>
// thrust::device_ptr<Scalar> cuAlgorithm<Scalar>::getThrustPtr(Scalar *other) {
// return thrust::device_ptr<Scalar>(other);
// }
// template<typename Scalar>
// Scalar cuAlgorithm<Scalar>::getDeviceConstant(Scalar *dev_data) {
// Scalar result;
// hipMemcpy(&result, dev_data, sizeof(Scalar), hipMemcpyDeviceToHost);
// return result;
// }
//
template<typename Scalar, unsigned int numThreads>
__global__ void setConstantKernel(Scalar * data, const size_t size, const Scalar value){
// printf("Here");
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int position = tid +bid*numThreads;
for (;position<size;position+=numThreads*blockDim.x){
data[position]=value;
// printf("Value is %f\n", data[position]);
// printf("Here is the %f\n", data[position]);
}
};
template<typename Scalar, unsigned int numThreads>
__global__ void rnormKernel(Scalar * data, const size_t size, const Scalar mean, const Scalar std){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int position = tid +bid*numThreads;
unsigned int seed = position;
hiprandState_t s;
// seed a random number generator
hiprand_init(seed, 0, 0, &s);
for (;position<size;position+=numThreads*blockDim.x){
// printf("Value is %f\n", mean+std*hiprand_normal(&s));
data[position]=(Scalar)(mean+std*hiprand_normal(&s));
}
};
template<typename Scalar, unsigned int numThreads>
void cuAlgorithm<Scalar, numThreads>::setConstant(hipStream_t stream, unsigned int numBlocks, Scalar *data, const size_t size,
const Scalar value) {
hipLaunchKernelGGL(( setConstantKernel<Scalar, numThreads>), dim3(numBlocks), dim3(numThreads), 0, stream, data, size, value);
// hipDeviceSynchronize();
}
template<typename Scalar, unsigned int numThreads>
void
cuAlgorithm<Scalar, numThreads>::rnorm(hipStream_t stream, unsigned int numBlocks, Scalar *data, const size_t size, const Scalar mean,
const Scalar std) {
hipLaunchKernelGGL(( rnormKernel<Scalar, numThreads>), dim3(numBlocks), dim3(numThreads), 0, stream, data, size, mean, std);
}
template<typename Scalar, unsigned int numThreads>
void
cuAlgorithm<Scalar, numThreads>::rnorm(unsigned int numBlocks, Scalar *data, const size_t size, const Scalar mean, const Scalar std) {
hipLaunchKernelGGL(( rnormKernel<Scalar, numThreads>), dim3(numBlocks), dim3(numThreads), 0, 0, data, size, mean, std);
}
template<typename Scalar, unsigned int numThreads>
void cuAlgorithm<Scalar, numThreads>::setConstant(unsigned int numBlocks, Scalar *data, const size_t size, const Scalar value) {
// cout << "Value is " << value << endl;
// cout << "Number of Blocks is " << numBlocks << endl;
// cout << "Number of Threads is " << numThreads << endl;
hipLaunchKernelGGL(( setConstantKernel<Scalar, numThreads>), dim3(numBlocks), dim3(numThreads), 0, 0, data, size, value);
// hipDeviceSynchronize();
}
}
|
ba5cdd6293cdf733192d884d3c470af77edf1109.cu
|
//
// Created by user on 15-11-16.
//
#include "cuMatExecDevice.h"
#include <curand_kernel.h>
#include <iostream>
using std::cout;
using std::endl;
namespace cuExec{
// template<typename Scalar>
// void cuAlgorithm<Scalar>::setConstant(Scalar *data, const size_t size, const Scalar value) {
// thrust::device_ptr<Scalar> d_ptr(data);
// thrust::fill_n(d_ptr, size, value);
// }
// template<typename Scalar>
// thrust::device_ptr<Scalar> cuAlgorithm<Scalar>::getThrustPtr(Scalar *other) {
// return thrust::device_ptr<Scalar>(other);
// }
// template<typename Scalar>
// Scalar cuAlgorithm<Scalar>::getDeviceConstant(Scalar *dev_data) {
// Scalar result;
// cudaMemcpy(&result, dev_data, sizeof(Scalar), cudaMemcpyDeviceToHost);
// return result;
// }
//
template<typename Scalar, unsigned int numThreads>
__global__ void setConstantKernel(Scalar * data, const size_t size, const Scalar value){
// printf("Here");
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int position = tid +bid*numThreads;
for (;position<size;position+=numThreads*blockDim.x){
data[position]=value;
// printf("Value is %f\n", data[position]);
// printf("Here is the %f\n", data[position]);
}
};
template<typename Scalar, unsigned int numThreads>
__global__ void rnormKernel(Scalar * data, const size_t size, const Scalar mean, const Scalar std){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int position = tid +bid*numThreads;
unsigned int seed = position;
curandState s;
// seed a random number generator
curand_init(seed, 0, 0, &s);
for (;position<size;position+=numThreads*blockDim.x){
// printf("Value is %f\n", mean+std*curand_normal(&s));
data[position]=(Scalar)(mean+std*curand_normal(&s));
}
};
template<typename Scalar, unsigned int numThreads>
void cuAlgorithm<Scalar, numThreads>::setConstant(cudaStream_t stream, unsigned int numBlocks, Scalar *data, const size_t size,
const Scalar value) {
setConstantKernel<Scalar, numThreads><<<numBlocks, numThreads, 0, stream>>>(data, size, value);
// cudaDeviceSynchronize();
}
template<typename Scalar, unsigned int numThreads>
void
cuAlgorithm<Scalar, numThreads>::rnorm(cudaStream_t stream, unsigned int numBlocks, Scalar *data, const size_t size, const Scalar mean,
const Scalar std) {
rnormKernel<Scalar, numThreads><<<numBlocks, numThreads, 0, stream>>>(data, size, mean, std);
}
template<typename Scalar, unsigned int numThreads>
void
cuAlgorithm<Scalar, numThreads>::rnorm(unsigned int numBlocks, Scalar *data, const size_t size, const Scalar mean, const Scalar std) {
rnormKernel<Scalar, numThreads><<<numBlocks, numThreads>>>(data, size, mean, std);
}
template<typename Scalar, unsigned int numThreads>
void cuAlgorithm<Scalar, numThreads>::setConstant(unsigned int numBlocks, Scalar *data, const size_t size, const Scalar value) {
// cout << "Value is " << value << endl;
// cout << "Number of Blocks is " << numBlocks << endl;
// cout << "Number of Threads is " << numThreads << endl;
setConstantKernel<Scalar, numThreads><<<numBlocks, numThreads>>>(data, size, value);
// cudaDeviceSynchronize();
}
}
|
a8a41f3e5f408fc8691aa230cb75651b65b212a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <vtkDoubleArray.h>
#include <vtkFloatArray.h>
#include "gmm_vtk_data_array.h"
#include <core/ndarray.h>
#include <core/thrust_common.h>
using namespace std;
namespace edda{
using namespace dist;
GmmVtkDataArray::~GmmVtkDataArray() { }
size_t GmmVtkDataArray::getLength() { return length; }
int GmmVtkDataArray::getNumComponents() { return this->components; }
GmmVtkDataArray::GmmVtkDataArray(vtkFieldData *fieldData, const char *arrayNamePrefix) {
target_comp = 0;
char meanArrayName[1024];
char stdevArrayName[1024];
char varArrayName[1024];
char weightArrayName[1024];
for (int i=0; i<fieldData->GetNumberOfArrays(); i++)
{
sprintf(meanArrayName, "%smean%d", arrayNamePrefix, i );
sprintf(stdevArrayName, "%sstdev%d", arrayNamePrefix, i );
sprintf(varArrayName, "%svar%d", arrayNamePrefix, i );
sprintf(weightArrayName, "%sweight%d", arrayNamePrefix, i );
vtkSmartPointer<vtkDataArray> meanArray = fieldData->GetArray(meanArrayName);
vtkSmartPointer<vtkDataArray> stdevArray = fieldData->GetArray(stdevArrayName);
vtkSmartPointer<vtkDataArray> varArray = fieldData->GetArray(varArrayName);
vtkSmartPointer<vtkDataArray> weightArray = fieldData->GetArray(weightArrayName);
if (i==0 && meanArray && weightArray==0) {
int c = meanArray->GetNumberOfComponents();
int n = meanArray->GetNumberOfTuples();
// allows when only one mean and one variance are provided
// create weight Array
weightArray = vtkSmartPointer<vtkFloatArray>::New();
weightArray->SetNumberOfComponents(c);
weightArray->SetNumberOfTuples(n);
// asign 1's
float *p = (float *)weightArray->GetVoidPointer(0);
for (int j=0; j<n*c; j++)
p[j] = 1.f;
}
if (meanArray && varArray && weightArray) {
// get components
int c = meanArray->GetNumberOfComponents();
if (c != varArray->GetNumberOfComponents() || c!= weightArray->GetNumberOfComponents()) {
printf("Warning: the number of array components do not match\n") ;
c = 1;
}
this->components = c;
// set arrays
arrays.push_back(meanArray);
arrays.push_back(varArray);
arrays.push_back(weightArray);
} else if (meanArray && stdevArray && weightArray) {
// get components
int c = meanArray->GetNumberOfComponents();
if (c != stdevArray->GetNumberOfComponents() || c!= weightArray->GetNumberOfComponents()) {
printf("Warning: the number of array components do not match\n") ;
c = 1;
}
this->components = c;
// convert stdev to variance
arrays.push_back(meanArray);
vtkSmartPointer<vtkFloatArray> varArray = vtkSmartPointer<vtkFloatArray>::New();
int n = stdevArray->GetNumberOfTuples();
varArray->SetNumberOfComponents(1);
varArray->SetNumberOfTuples(n);
for (int j=0; j<n; j++)
*(float *)varArray->GetVoidPointer(j) = pow(stdevArray->GetTuple1(j), 2.) ;
// set arrays
arrays.push_back(varArray);
arrays.push_back(weightArray);
}
}
if (arrays.size() == 0) {
length = 0;
return;
}
length = arrays[0]->GetNumberOfTuples();
for (size_t i=1; i<arrays.size(); i++)
{
length = ::min(length, (size_t)arrays[i]->GetNumberOfTuples());
}
}
GmmVtkDataArray::GmmVtkDataArray(std::vector<vtkSmartPointer<vtkDataArray> > arrays_) {
target_comp = 0;
if (arrays_.size() == 0) {
printf("Warning: no array assigned to GmmVtkArray\n");
return;
}
if (arrays_.size() % 3 != 0) {
printf("Warning: GmmVtkArray: some input arrays are truncated\n");
}
for (size_t i=0; i<arrays_.size()/3; i++) {
this->arrays.push_back(arrays_[i]);
this->arrays.push_back(arrays_[i+1]);
this->arrays.push_back(arrays_[i+2]);
}
length = this->arrays[0]->GetNumberOfTuples();
for (size_t i=0; i<this->arrays.size(); i++)
{
length = min(length, (size_t)this->arrays[i]->GetNumberOfTuples());
}
}
dist::Variant GmmVtkDataArray::getDistr(size_t idx) {
std::vector<GMMTuple> models ( arrays.size()/3 );
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, 0);
}
return DefaultGaussianMixture(models);
}
std::vector<dist::Variant> GmmVtkDataArray::getDistrVector(size_t idx) {
int components = this->getNumComponents();
std::vector<dist::Variant> v( components );
for (int c = 0; c < components; c++ )
{
std::vector<GMMTuple> models(arrays.size()/3) ;
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, c);
}
v[c] = DefaultGaussianMixture(models);
}
return v;
}
Real GmmVtkDataArray::getScalar(size_t idx) {
return getSample(getDistr(idx));
}
std::vector<Real> GmmVtkDataArray::getVector(size_t idx) {
int components = this->getNumComponents();
std::vector<Real> v( components );
for (int c = 0; c < components; c++ )
{
std::vector<GMMTuple> models(arrays.size()/3) ;
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, c);
}
v[c] = getSample(DefaultGaussianMixture(models));
}
return v;
}
std::shared_ptr<GmmArray> GmmVtkDataArray::genNdArray() {
//int n= arrays[i]->GetNumberOfTuples();
//NdArray ndarray({length, arrays.size()});
std::vector<NdArray<Real> > data(arrays.size());
int n = length; // array length
for (size_t i=0; i<arrays.size(); i++) {
vtkFloatArray *farray = vtkFloatArray::SafeDownCast( arrays[i].Get() );
vtkDoubleArray *darray = vtkDoubleArray::SafeDownCast( arrays[i].Get() );
// Sometimes Real type that edda is using can be different from vtk array type
// The following automatically converts vtk Float/Double Array to Real type
if ((farray && sizeof(float) == sizeof(Real)) // alternative: typeid(Real) == typeid(float)
|| (darray && sizeof(double)==sizeof(Real)) ) {
NdArray<Real> ndarray((Real *)arrays[i]->GetVoidPointer(0), {n} );
data[i].take(ndarray);
} else if ( sizeof(float) == sizeof(Real) ){
// create a temp array in float
vtkFloatArray *newArray = vtkFloatArray::New();
newArray->DeepCopy(arrays[i]);
NdArray<Real> ndarray((Real *)newArray->GetVoidPointer(0), {n} );
data[i].take(ndarray);
newArray->Delete();
} else if ( sizeof(double) == sizeof(Real) ) {
// create a temp array in double
vtkDoubleArray *newArray = vtkDoubleArray::New();
newArray->DeepCopy(arrays[i]);
NdArray<Real> ndarray((Real *)newArray->GetVoidPointer(0), {n} );
data[i].take(ndarray);
newArray->Delete();
} else {
throw std::runtime_error("Real type not float or double.");
}
}
return std::shared_ptr<GmmArray> ( new GmmArray(data) );
}
}; //edda
|
a8a41f3e5f408fc8691aa230cb75651b65b212a7.cu
|
#include <algorithm>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <vtkDoubleArray.h>
#include <vtkFloatArray.h>
#include "gmm_vtk_data_array.h"
#include <core/ndarray.h>
#include <core/thrust_common.h>
using namespace std;
namespace edda{
using namespace dist;
GmmVtkDataArray::~GmmVtkDataArray() { }
size_t GmmVtkDataArray::getLength() { return length; }
int GmmVtkDataArray::getNumComponents() { return this->components; }
GmmVtkDataArray::GmmVtkDataArray(vtkFieldData *fieldData, const char *arrayNamePrefix) {
target_comp = 0;
char meanArrayName[1024];
char stdevArrayName[1024];
char varArrayName[1024];
char weightArrayName[1024];
for (int i=0; i<fieldData->GetNumberOfArrays(); i++)
{
sprintf(meanArrayName, "%smean%d", arrayNamePrefix, i );
sprintf(stdevArrayName, "%sstdev%d", arrayNamePrefix, i );
sprintf(varArrayName, "%svar%d", arrayNamePrefix, i );
sprintf(weightArrayName, "%sweight%d", arrayNamePrefix, i );
vtkSmartPointer<vtkDataArray> meanArray = fieldData->GetArray(meanArrayName);
vtkSmartPointer<vtkDataArray> stdevArray = fieldData->GetArray(stdevArrayName);
vtkSmartPointer<vtkDataArray> varArray = fieldData->GetArray(varArrayName);
vtkSmartPointer<vtkDataArray> weightArray = fieldData->GetArray(weightArrayName);
if (i==0 && meanArray && weightArray==0) {
int c = meanArray->GetNumberOfComponents();
int n = meanArray->GetNumberOfTuples();
// allows when only one mean and one variance are provided
// create weight Array
weightArray = vtkSmartPointer<vtkFloatArray>::New();
weightArray->SetNumberOfComponents(c);
weightArray->SetNumberOfTuples(n);
// asign 1's
float *p = (float *)weightArray->GetVoidPointer(0);
for (int j=0; j<n*c; j++)
p[j] = 1.f;
}
if (meanArray && varArray && weightArray) {
// get components
int c = meanArray->GetNumberOfComponents();
if (c != varArray->GetNumberOfComponents() || c!= weightArray->GetNumberOfComponents()) {
printf("Warning: the number of array components do not match\n") ;
c = 1;
}
this->components = c;
// set arrays
arrays.push_back(meanArray);
arrays.push_back(varArray);
arrays.push_back(weightArray);
} else if (meanArray && stdevArray && weightArray) {
// get components
int c = meanArray->GetNumberOfComponents();
if (c != stdevArray->GetNumberOfComponents() || c!= weightArray->GetNumberOfComponents()) {
printf("Warning: the number of array components do not match\n") ;
c = 1;
}
this->components = c;
// convert stdev to variance
arrays.push_back(meanArray);
vtkSmartPointer<vtkFloatArray> varArray = vtkSmartPointer<vtkFloatArray>::New();
int n = stdevArray->GetNumberOfTuples();
varArray->SetNumberOfComponents(1);
varArray->SetNumberOfTuples(n);
for (int j=0; j<n; j++)
*(float *)varArray->GetVoidPointer(j) = pow(stdevArray->GetTuple1(j), 2.) ;
// set arrays
arrays.push_back(varArray);
arrays.push_back(weightArray);
}
}
if (arrays.size() == 0) {
length = 0;
return;
}
length = arrays[0]->GetNumberOfTuples();
for (size_t i=1; i<arrays.size(); i++)
{
length = std::min(length, (size_t)arrays[i]->GetNumberOfTuples());
}
}
GmmVtkDataArray::GmmVtkDataArray(std::vector<vtkSmartPointer<vtkDataArray> > arrays_) {
target_comp = 0;
if (arrays_.size() == 0) {
printf("Warning: no array assigned to GmmVtkArray\n");
return;
}
if (arrays_.size() % 3 != 0) {
printf("Warning: GmmVtkArray: some input arrays are truncated\n");
}
for (size_t i=0; i<arrays_.size()/3; i++) {
this->arrays.push_back(arrays_[i]);
this->arrays.push_back(arrays_[i+1]);
this->arrays.push_back(arrays_[i+2]);
}
length = this->arrays[0]->GetNumberOfTuples();
for (size_t i=0; i<this->arrays.size(); i++)
{
length = min(length, (size_t)this->arrays[i]->GetNumberOfTuples());
}
}
dist::Variant GmmVtkDataArray::getDistr(size_t idx) {
std::vector<GMMTuple> models ( arrays.size()/3 );
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, 0);
}
return DefaultGaussianMixture(models);
}
std::vector<dist::Variant> GmmVtkDataArray::getDistrVector(size_t idx) {
int components = this->getNumComponents();
std::vector<dist::Variant> v( components );
for (int c = 0; c < components; c++ )
{
std::vector<GMMTuple> models(arrays.size()/3) ;
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, c);
}
v[c] = DefaultGaussianMixture(models);
}
return v;
}
Real GmmVtkDataArray::getScalar(size_t idx) {
return getSample(getDistr(idx));
}
std::vector<Real> GmmVtkDataArray::getVector(size_t idx) {
int components = this->getNumComponents();
std::vector<Real> v( components );
for (int c = 0; c < components; c++ )
{
std::vector<GMMTuple> models(arrays.size()/3) ;
for (size_t i=0; i<arrays.size(); i++) {
models[i/3].p[i%3] = arrays[i]->GetComponent(idx, c);
}
v[c] = getSample(DefaultGaussianMixture(models));
}
return v;
}
std::shared_ptr<GmmArray> GmmVtkDataArray::genNdArray() {
//int n= arrays[i]->GetNumberOfTuples();
//NdArray ndarray({length, arrays.size()});
std::vector<NdArray<Real> > data(arrays.size());
int n = length; // array length
for (size_t i=0; i<arrays.size(); i++) {
vtkFloatArray *farray = vtkFloatArray::SafeDownCast( arrays[i].Get() );
vtkDoubleArray *darray = vtkDoubleArray::SafeDownCast( arrays[i].Get() );
// Sometimes Real type that edda is using can be different from vtk array type
// The following automatically converts vtk Float/Double Array to Real type
if ((farray && sizeof(float) == sizeof(Real)) // alternative: typeid(Real) == typeid(float)
|| (darray && sizeof(double)==sizeof(Real)) ) {
NdArray<Real> ndarray((Real *)arrays[i]->GetVoidPointer(0), {n} );
data[i].take(ndarray);
} else if ( sizeof(float) == sizeof(Real) ){
// create a temp array in float
vtkFloatArray *newArray = vtkFloatArray::New();
newArray->DeepCopy(arrays[i]);
NdArray<Real> ndarray((Real *)newArray->GetVoidPointer(0), {n} );
data[i].take(ndarray);
newArray->Delete();
} else if ( sizeof(double) == sizeof(Real) ) {
// create a temp array in double
vtkDoubleArray *newArray = vtkDoubleArray::New();
newArray->DeepCopy(arrays[i]);
NdArray<Real> ndarray((Real *)newArray->GetVoidPointer(0), {n} );
data[i].take(ndarray);
newArray->Delete();
} else {
throw std::runtime_error("Real type not float or double.");
}
}
return std::shared_ptr<GmmArray> ( new GmmArray(data) );
}
}; //edda
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.